hash
stringlengths
64
64
content
stringlengths
0
1.51M
59301ddda75f17d50d66a7e143911a2cb41d241d3d945d64e71396c9a60b2ebb
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.utils.decorators import format_doc from astropy.coordinates.representation import CartesianRepresentation, CartesianDifferential from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc from astropy.coordinates.attributes import (TimeAttribute, EarthLocationAttribute) from .utils import DEFAULT_OBSTIME, EARTH_CENTER __all__ = ['ITRS'] doc_footer = """ Other parameters ---------------- obstime : `~astropy.time.Time` The time at which the observation is taken. Used for determining the position of the Earth and its precession. location : `~astropy.coordinates.EarthLocation` The location on the Earth. This can be specified either as an `~astropy.coordinates.EarthLocation` object or as anything that can be transformed to an `~astropy.coordinates.ITRS` frame. The default is the centre of the Earth. """ @format_doc(base_doc, components="", footer=doc_footer) class ITRS(BaseCoordinateFrame): """ A coordinate or frame in the International Terrestrial Reference System (ITRS). This is approximately a geocentric system, although strictly it is defined by a series of reference locations near the surface of the Earth (the ITRF). For more background on the ITRS, see the references provided in the :ref:`astropy:astropy-coordinates-seealso` section of the documentation. This frame also includes frames that are defined *relative* to the center of the Earth, but that are offset (in both position and velocity) from the center of the Earth. You may see such non-geocentric coordinates referred to as "topocentric". Topocentric ITRS frames are convenient for observations of near Earth objects where stellar aberration is not included. One can merely subtract the observing site's EarthLocation geocentric ITRS coordinates from the object's geocentric ITRS coordinates, put the resulting vector into a topocentric ITRS frame and then transform to `~astropy.coordinates.AltAz` or `~astropy.coordinates.HADec`. The other way around is to transform an observed `~astropy.coordinates.AltAz` or `~astropy.coordinates.HADec` position to a topocentric ITRS frame and add the observing site's EarthLocation geocentric ITRS coordinates to yield the object's geocentric ITRS coordinates. On the other hand, using ``transform_to`` to transform geocentric ITRS coordinates to topocentric ITRS, observed `~astropy.coordinates.AltAz`, or observed `~astropy.coordinates.HADec` coordinates includes the difference between stellar aberration from the point of view of an observer at the geocenter and stellar aberration from the point of view of an observer on the surface of the Earth. If the geocentric ITRS coordinates of the object include stellar aberration at the geocenter (e.g. certain ILRS ephemerides), then this is the way to go. Note to ILRS ephemeris users: Astropy does not currently consider relativistic effects of the Earth's gravatational field. Nor do the `~astropy.coordinates.AltAz` or `~astropy.coordinates.HADec` refraction corrections compute the change in the range due to the curved path of light through the atmosphere, so Astropy is no substitute for the ILRS software in these respects. """ default_representation = CartesianRepresentation default_differential = CartesianDifferential obstime = TimeAttribute(default=DEFAULT_OBSTIME) location = EarthLocationAttribute(default=EARTH_CENTER) @property def earth_location(self): """ The data in this frame as an `~astropy.coordinates.EarthLocation` class. """ from astropy.coordinates.earth import EarthLocation cart = self.represent_as(CartesianRepresentation) return EarthLocation(x=cart.x, y=cart.y, z=cart.z) # Self-transform is in intermediate_rotation_transforms.py with all the other # ITRS transforms
e5b90a706af5fc055d8157791f16c14ab97516f52b23b2649ec55851dd03fc7e
import numpy as np import erfa from astropy import units as u from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_transpose from astropy.coordinates.baseframe import frame_transform_graph from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference from astropy.coordinates.representation import CartesianRepresentation from .altaz import AltAz from .hadec import HADec from .itrs import ITRS # Minimum cos(alt) and sin(alt) for refraction purposes CELMIN = 1e-6 SELMIN = 0.05 # Latitude of the north pole. NORTH_POLE = 90.0*u.deg def itrs_to_altaz_mat(lon, lat): # form ITRS to AltAz matrix # AltAz frame is left handed minus_x = np.eye(3) minus_x[0][0] = -1.0 mat = (minus_x @ rotation_matrix(NORTH_POLE - lat, 'y') @ rotation_matrix(lon, 'z')) return mat def itrs_to_hadec_mat(lon): # form ITRS to HADec matrix # HADec frame is left handed minus_y = np.eye(3) minus_y[1][1] = -1.0 mat = (minus_y @ rotation_matrix(lon, 'z')) return mat def altaz_to_hadec_mat(lat): # form AltAz to HADec matrix z180 = np.eye(3) z180[0][0] = -1.0 z180[1][1] = -1.0 mat = (z180 @ rotation_matrix(NORTH_POLE - lat, 'y')) return mat def add_refraction(aa_crepr, observed_frame): # add refraction to AltAz cartesian representation refa, refb = erfa.refco( observed_frame.pressure.to_value(u.hPa), observed_frame.temperature.to_value(u.deg_C), observed_frame.relative_humidity.value, observed_frame.obswl.to_value(u.micron) ) # reference: erfa.atioq() norm, uv = erfa.pn(aa_crepr.get_xyz(xyz_axis=-1).to_value()) # Cosine and sine of altitude, with precautions. sel = np.maximum(uv[..., 2], SELMIN) cel = np.maximum(np.sqrt(uv[..., 0] ** 2 + uv[..., 1] ** 2), CELMIN) # A*tan(z)+B*tan^3(z) model, with Newton-Raphson correction. tan_z = cel / sel w = refb * tan_z ** 2 delta_el = (refa + w) * tan_z / (1.0 + (refa + 3.0 * w) / (sel ** 2)) # Apply the change, giving observed vector cosdel = 1.0 - 0.5 * delta_el ** 2 f = cosdel - delta_el * sel / cel uv[..., 0] *= f uv[..., 1] *= f uv[..., 2] = cosdel * uv[..., 2] + delta_el * cel # Need to renormalize to get agreement with CIRS->Observed on distance norm2, uv = erfa.pn(uv) uv = erfa.sxp(norm, uv) return CartesianRepresentation(uv, xyz_axis=-1, unit=aa_crepr.x.unit, copy=False) def remove_refraction(aa_crepr, observed_frame): # remove refraction from AltAz cartesian representation refa, refb = erfa.refco( observed_frame.pressure.to_value(u.hPa), observed_frame.temperature.to_value(u.deg_C), observed_frame.relative_humidity.value, observed_frame.obswl.to_value(u.micron) ) # reference: erfa.atoiq() norm, uv = erfa.pn(aa_crepr.get_xyz(xyz_axis=-1).to_value()) # Cosine and sine of altitude, with precautions. sel = np.maximum(uv[..., 2], SELMIN) cel = np.sqrt(uv[..., 0] ** 2 + uv[..., 1] ** 2) # A*tan(z)+B*tan^3(z) model tan_z = cel / sel delta_el = (refa + refb * tan_z ** 2) * tan_z # Apply the change, giving observed vector. az, el = erfa.c2s(uv) el -= delta_el uv = erfa.s2c(az, el) uv = erfa.sxp(norm, uv) return CartesianRepresentation(uv, xyz_axis=-1, unit=aa_crepr.x.unit, copy=False) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, AltAz) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, HADec) def itrs_to_observed(itrs_coo, observed_frame): if (np.any(itrs_coo.location != observed_frame.location) or np.any(itrs_coo.obstime != observed_frame.obstime)): # This transform will go through the CIRS and alter stellar aberration. itrs_coo = itrs_coo.transform_to(ITRS(obstime=observed_frame.obstime, location=observed_frame.location)) lon, lat, height = observed_frame.location.to_geodetic('WGS84') if isinstance(observed_frame, AltAz) or (observed_frame.pressure > 0.0): crepr = itrs_coo.cartesian.transform(itrs_to_altaz_mat(lon, lat)) if observed_frame.pressure > 0.0: crepr = add_refraction(crepr, observed_frame) if isinstance(observed_frame, HADec): crepr = crepr.transform(altaz_to_hadec_mat(lat)) else: crepr = itrs_coo.cartesian.transform(itrs_to_hadec_mat(lon)) return observed_frame.realize_frame(crepr) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, ITRS) @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HADec, ITRS) def observed_to_itrs(observed_coo, itrs_frame): lon, lat, height = observed_coo.location.to_geodetic('WGS84') if isinstance(observed_coo, AltAz) or (observed_coo.pressure > 0.0): crepr = observed_coo.cartesian if observed_coo.pressure > 0.0: if isinstance(observed_coo, HADec): crepr = crepr.transform(matrix_transpose(altaz_to_hadec_mat(lat))) crepr = remove_refraction(crepr, observed_coo) crepr = crepr.transform(matrix_transpose(itrs_to_altaz_mat(lon, lat))) else: crepr = observed_coo.cartesian.transform(matrix_transpose(itrs_to_hadec_mat(lon))) itrs_at_obs_time = ITRS(crepr, obstime=observed_coo.obstime, location=observed_coo.location) # This final transform may be a no-op if the obstimes and locations are the same. # Otherwise, this transform will go through the CIRS and alter stellar aberration. return itrs_at_obs_time.transform_to(itrs_frame)
f37dd91e453351047431da8fd32fbbb619c9b2fcf7f03272b6a780580a8b79a2
from contextlib import nullcontext import astropy.units as u import numpy as np from numpy.testing import assert_allclose import pytest from astropy import time from astropy.constants import c from astropy.table import Table from astropy.time import Time from astropy.utils import iers from astropy.coordinates import (SkyCoord, EarthLocation, ICRS, GCRS, Galactic, CartesianDifferential, get_body_barycentric_posvel, FK5, CartesianRepresentation, SpectralQuantity) from astropy.tests.helper import assert_quantity_allclose, quantity_allclose from astropy.utils.exceptions import AstropyUserWarning, AstropyWarning from astropy.utils.data import get_pkg_data_filename from astropy.coordinates.spectral_coordinate import SpectralCoord, _apply_relativistic_doppler_shift from astropy.wcs.wcsapi.fitswcs import VELOCITY_FRAMES as FITSWCS_VELOCITY_FRAMES def assert_frame_allclose(frame1, frame2, pos_rtol=1e-7, pos_atol=1 * u.m, vel_rtol=1e-7, vel_atol=1 * u.mm / u.s): # checks that: # - the positions are equal to within some tolerance (the relative tolerance # should be dimensionless, the absolute tolerance should be a distance). # note that these are the tolerances *in 3d* # - either both or nether frame has velocities, or if one has no velocities # the other one can have zero velocities # - if velocities are present, they are equal to some tolerance # Ideally this should accept both frames and SkyCoords if hasattr(frame1, 'frame'): # SkyCoord-like frame1 = frame1.frame if hasattr(frame2, 'frame'): # SkyCoord-like frame2 = frame2.frame # assert (frame1.data.differentials and frame2.data.differentials or # (not frame1.data.differentials and not frame2.data.differentials)) assert frame1.is_equivalent_frame(frame2) frame2_in_1 = frame2.transform_to(frame1) assert_quantity_allclose(0 * u.m, frame1.separation_3d(frame2_in_1), rtol=pos_rtol, atol=pos_atol) if frame1.data.differentials: d1 = frame1.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials['s'] d2 = frame2_in_1.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials['s'] assert_quantity_allclose(d1.norm(d1), d1.norm(d2), rtol=vel_rtol, atol=vel_atol) @pytest.fixture(scope="module") def greenwich_earthlocation(request): if ( not hasattr(EarthLocation, '_site_registry') and request.config.getoption("remote_data") == "none" ): EarthLocation._get_site_registry(force_builtin=True) return EarthLocation.of_site("Greenwich") # GENERAL TESTS # We first run through a series of cases to test different ways of initializing # the observer and target for SpectralCoord, including for example frames, # SkyCoords, and making sure that SpectralCoord is not sensitive to the actual # frame or representation class. # Local Standard of Rest LSRD = Galactic(u=0.1 * u.km, v=0.1 * u.km, w=0.1 * u.km, U=9 * u.km / u.s, V=12 * u.km / u.s, W=7 * u.km / u.s, representation_type='cartesian', differential_type='cartesian') LSRD_EQUIV = [ LSRD, SkyCoord(LSRD), # as a SkyCoord LSRD.transform_to(ICRS()), # different frame LSRD.transform_to(ICRS()).transform_to(Galactic()) # different representation ] @pytest.fixture(params=[None] + LSRD_EQUIV) def observer(request): return request.param # Target located in direction of motion of LSRD with no velocities LSRD_DIR_STATIONARY = Galactic(u=9 * u.km, v=12 * u.km, w=7 * u.km, representation_type='cartesian') LSRD_DIR_STATIONARY_EQUIV = [ LSRD_DIR_STATIONARY, SkyCoord(LSRD_DIR_STATIONARY), # as a SkyCoord LSRD_DIR_STATIONARY.transform_to(FK5()), # different frame LSRD_DIR_STATIONARY.transform_to(ICRS()).transform_to(Galactic()) # different representation ] @pytest.fixture(params=[None] + LSRD_DIR_STATIONARY_EQUIV) def target(request): return request.param def test_create_spectral_coord_observer_target(observer, target): with nullcontext() if target is None else pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): coord = SpectralCoord([100, 200, 300] * u.nm, observer=observer, target=target) if observer is None: assert coord.observer is None else: assert_frame_allclose(observer, coord.observer) if target is None: assert coord.target is None else: assert_frame_allclose(target, coord.target) assert coord.doppler_rest is None assert coord.doppler_convention is None if observer is None or target is None: assert quantity_allclose(coord.redshift, 0) assert quantity_allclose(coord.radial_velocity, 0 * u.km/u.s) elif (any(observer is lsrd for lsrd in LSRD_EQUIV) and any(target is lsrd for lsrd in LSRD_DIR_STATIONARY_EQUIV)): assert_quantity_allclose(coord.radial_velocity, -274 ** 0.5 * u.km / u.s, atol=1e-4 * u.km / u.s) assert_quantity_allclose(coord.redshift, -5.5213158163147646e-05, atol=1e-9) else: raise NotImplementedError() def test_create_from_spectral_coord(observer, target): """ Checks that parameters are correctly copied to the new SpectralCoord object """ with nullcontext() if target is None else pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): spec_coord1 = SpectralCoord([100, 200, 300] * u.nm, observer=observer, target=target, doppler_convention='optical', doppler_rest=6000*u.AA) spec_coord2 = SpectralCoord(spec_coord1) assert spec_coord1.observer == spec_coord2.observer assert spec_coord1.target == spec_coord2.target assert spec_coord1.radial_velocity == spec_coord2.radial_velocity assert spec_coord1.doppler_convention == spec_coord2.doppler_convention assert spec_coord1.doppler_rest == spec_coord2.doppler_rest # INTERNAL FUNCTIONS TESTS def test_apply_relativistic_doppler_shift(): # Frequency sq1 = SpectralQuantity(1 * u.GHz) sq2 = _apply_relativistic_doppler_shift(sq1, 0.5 * c) assert_quantity_allclose(sq2, np.sqrt(1. / 3.) * u.GHz) # Wavelength sq3 = SpectralQuantity(500 * u.nm) sq4 = _apply_relativistic_doppler_shift(sq3, 0.5 * c) assert_quantity_allclose(sq4, np.sqrt(3) * 500 * u.nm) # Energy sq5 = SpectralQuantity(300 * u.eV) sq6 = _apply_relativistic_doppler_shift(sq5, 0.5 * c) assert_quantity_allclose(sq6, np.sqrt(1. / 3.) * 300 * u.eV) # Wavenumber sq7 = SpectralQuantity(0.01 / u.micron) sq8 = _apply_relativistic_doppler_shift(sq7, 0.5 * c) assert_quantity_allclose(sq8, np.sqrt(1. / 3.) * 0.01 / u.micron) # Velocity (doppler_convention='relativistic') sq9 = SpectralQuantity(200 * u.km / u.s, doppler_convention='relativistic', doppler_rest=1 * u.GHz) sq10 = _apply_relativistic_doppler_shift(sq9, 300 * u.km / u.s) assert_quantity_allclose(sq10, 499.999666 * u.km / u.s) assert sq10.doppler_convention == 'relativistic' # Velocity (doppler_convention='optical') sq11 = SpectralQuantity(200 * u.km / u.s, doppler_convention='radio', doppler_rest=1 * u.GHz) sq12 = _apply_relativistic_doppler_shift(sq11, 300 * u.km / u.s) assert_quantity_allclose(sq12, 499.650008 * u.km / u.s) assert sq12.doppler_convention == 'radio' # Velocity (doppler_convention='radio') sq13 = SpectralQuantity(200 * u.km / u.s, doppler_convention='optical', doppler_rest=1 * u.GHz) sq14 = _apply_relativistic_doppler_shift(sq13, 300 * u.km / u.s) assert_quantity_allclose(sq14, 500.350493 * u.km / u.s) assert sq14.doppler_convention == 'optical' # Velocity - check relativistic velocity addition sq13 = SpectralQuantity(0 * u.km / u.s, doppler_convention='relativistic', doppler_rest=1 * u.GHz) sq14 = _apply_relativistic_doppler_shift(sq13, 0.999 * c) assert_quantity_allclose(sq14, 0.999 * c) sq14 = _apply_relativistic_doppler_shift(sq14, 0.999 * c) assert_quantity_allclose(sq14, (0.999 * 2) / (1 + 0.999**2) * c) assert sq14.doppler_convention == 'relativistic' # Cases that should raise errors sq15 = SpectralQuantity(200 * u.km / u.s) with pytest.raises(ValueError, match='doppler_convention not set'): _apply_relativistic_doppler_shift(sq15, 300 * u.km / u.s) sq16 = SpectralQuantity(200 * u.km / u.s, doppler_rest=10 * u.GHz) with pytest.raises(ValueError, match='doppler_convention not set'): _apply_relativistic_doppler_shift(sq16, 300 * u.km / u.s) sq17 = SpectralQuantity(200 * u.km / u.s, doppler_convention='optical') with pytest.raises(ValueError, match='doppler_rest not set'): _apply_relativistic_doppler_shift(sq17, 300 * u.km / u.s) # BASIC TESTS def test_init_quantity(): sc = SpectralCoord(10 * u.GHz) assert sc.value == 10. assert sc.unit is u.GHz assert sc.doppler_convention is None assert sc.doppler_rest is None assert sc.observer is None assert sc.target is None def test_init_spectral_quantity(): sc = SpectralCoord(SpectralQuantity(10 * u.GHz, doppler_convention='optical')) assert sc.value == 10. assert sc.unit is u.GHz assert sc.doppler_convention == 'optical' assert sc.doppler_rest is None assert sc.observer is None assert sc.target is None def test_init_too_many_args(): with pytest.raises(ValueError, match='Cannot specify radial velocity or redshift if both'): SpectralCoord(10 * u.GHz, observer=LSRD, target=SkyCoord(10, 20, unit='deg'), radial_velocity=1 * u.km / u.s) with pytest.raises(ValueError, match='Cannot specify radial velocity or redshift if both'): SpectralCoord(10 * u.GHz, observer=LSRD, target=SkyCoord(10, 20, unit='deg'), redshift=1) with pytest.raises(ValueError, match='Cannot set both a radial velocity and redshift'): SpectralCoord(10 * u.GHz, radial_velocity=1 * u.km / u.s, redshift=1) def test_init_wrong_type(): with pytest.raises(TypeError, match='observer must be a SkyCoord or coordinate frame instance'): SpectralCoord(10 * u.GHz, observer=3.4) with pytest.raises(TypeError, match='target must be a SkyCoord or coordinate frame instance'): SpectralCoord(10 * u.GHz, target=3.4) with pytest.raises(u.UnitsError, match="Argument 'radial_velocity' to function " "'__new__' must be in units convertible to 'km / s'"): SpectralCoord(10 * u.GHz, radial_velocity=1 * u.kg) with pytest.raises(TypeError, match="Argument 'radial_velocity' to function " "'__new__' has no 'unit' attribute. You should " "pass in an astropy Quantity instead."): SpectralCoord(10 * u.GHz, radial_velocity='banana') with pytest.raises(u.UnitsError, match='redshift should be dimensionless'): SpectralCoord(10 * u.GHz, redshift=1 * u.m) with pytest.raises(TypeError, match='Cannot parse "banana" as a Quantity. It does not start with a number.'): SpectralCoord(10 * u.GHz, redshift='banana') def test_observer_init_rv_behavior(): """ Test basic initialization behavior or observer/target and redshift/rv """ # Start off by specifying the radial velocity only sc_init = SpectralCoord([4000, 5000]*u.AA, radial_velocity=100*u.km/u.s) assert sc_init.observer is None assert sc_init.target is None assert_quantity_allclose(sc_init.radial_velocity, 100*u.km/u.s) # Next, set the observer, and check that the radial velocity hasn't changed with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): sc_init.observer = ICRS(CartesianRepresentation([0*u.km, 0*u.km, 0*u.km])) assert sc_init.observer is not None assert_quantity_allclose(sc_init.radial_velocity, 100*u.km/u.s) # Setting the target should now cause the original radial velocity to be # dropped in favor of the automatically computed one sc_init.target = SkyCoord(CartesianRepresentation([1*u.km, 0*u.km, 0*u.km]), frame='icrs', radial_velocity=30 * u.km / u.s) assert sc_init.target is not None assert_quantity_allclose(sc_init.radial_velocity, 30 * u.km / u.s) # The observer can only be set if originally None - now that it isn't # setting it again should fail with pytest.raises(ValueError, match='observer has already been set'): sc_init.observer = GCRS(CartesianRepresentation([0*u.km, 1*u.km, 0*u.km])) # And similarly, changing the target should not be possible with pytest.raises(ValueError, match='target has already been set'): sc_init.target = GCRS(CartesianRepresentation([0*u.km, 1*u.km, 0*u.km])) def test_rv_redshift_initialization(): # Check that setting the redshift sets the radial velocity appropriately, # and that the redshift can be recovered sc_init = SpectralCoord([4000, 5000]*u.AA, redshift=1) assert isinstance(sc_init.redshift, u.Quantity) assert_quantity_allclose(sc_init.redshift, 1*u.dimensionless_unscaled) assert_quantity_allclose(sc_init.radial_velocity, 0.6 * c) # Check that setting the same radial velocity produces the same redshift # and that the radial velocity can be recovered sc_init2 = SpectralCoord([4000, 5000]*u.AA, radial_velocity=0.6 * c) assert_quantity_allclose(sc_init2.redshift, 1*u.dimensionless_unscaled) assert_quantity_allclose(sc_init2.radial_velocity, 0.6 * c) # Check that specifying redshift as a quantity works sc_init3 = SpectralCoord([4000, 5000]*u.AA, redshift=1 * u.one) assert sc_init.redshift == sc_init3.redshift # Make sure that both redshift and radial velocity can't be specified at # the same time. with pytest.raises(ValueError, match='Cannot set both a radial velocity and redshift'): SpectralCoord([4000, 5000]*u.AA, radial_velocity=10*u.km/u.s, redshift=2) def test_replicate(): # The replicate method makes a new object with attributes updated, but doesn't # do any conversion sc_init = SpectralCoord([4000, 5000]*u.AA, redshift=2) sc_set_rv = sc_init.replicate(redshift=1) assert_quantity_allclose(sc_set_rv.radial_velocity, 0.6 * c) assert_quantity_allclose(sc_init, [4000, 5000] * u.AA) sc_set_rv = sc_init.replicate(radial_velocity=c / 2) assert_quantity_allclose(sc_set_rv.redshift, np.sqrt(3) - 1) assert_quantity_allclose(sc_init, [4000, 5000] * u.AA) gcrs_origin = GCRS(CartesianRepresentation([0*u.km, 0*u.km, 0*u.km])) with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): sc_init2 = SpectralCoord([4000, 5000]*u.AA, redshift=1, observer=gcrs_origin) with np.errstate(all='ignore'): sc_init2.replicate(redshift=.5) assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA) with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): sc_init3 = SpectralCoord([4000, 5000]*u.AA, redshift=1, target=gcrs_origin) with np.errstate(all='ignore'): sc_init3.replicate(redshift=.5) assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA) with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): sc_init4 = SpectralCoord([4000, 5000]*u.AA, observer=gcrs_origin, target=gcrs_origin) with pytest.raises(ValueError, match='Cannot specify radial velocity or redshift if both target and observer are specified'): sc_init4.replicate(redshift=.5) sc_init = SpectralCoord([4000, 5000]*u.AA, redshift=2) sc_init_copy = sc_init.replicate(copy=True) sc_init[0] = 6000 * u.AA assert_quantity_allclose(sc_init_copy, [4000, 5000] * u.AA) sc_init = SpectralCoord([4000, 5000]*u.AA, redshift=2) sc_init_ref = sc_init.replicate() sc_init[0] = 6000 * u.AA assert_quantity_allclose(sc_init_ref, [6000, 5000] * u.AA) def test_with_observer_stationary_relative_to(): # Simple tests of with_observer_stationary_relative_to to cover different # ways of calling it # The replicate method makes a new object with attributes updated, but doesn't # do any conversion sc1 = SpectralCoord([4000, 5000]*u.AA) with pytest.raises(ValueError, match='This method can only be used if both ' 'observer and target are defined on the ' 'SpectralCoord'): sc1.with_observer_stationary_relative_to('icrs') sc2 = SpectralCoord([4000, 5000] * u.AA, observer=ICRS(0 * u.km, 0 * u.km, 0 * u.km, -1 * u.km / u.s, 0 * u.km / u.s, -1 * u.km / u.s, representation_type='cartesian', differential_type='cartesian'), target=ICRS(0 * u.deg, 45 * u.deg, distance=1 * u.kpc, radial_velocity=2 * u.km / u.s)) # Motion of observer is in opposite direction to target assert_quantity_allclose(sc2.radial_velocity, (2 + 2 ** 0.5) * u.km / u.s) # Change to observer that is stationary in ICRS sc3 = sc2.with_observer_stationary_relative_to('icrs') # Velocity difference is now pure radial velocity of target assert_quantity_allclose(sc3.radial_velocity, 2 * u.km / u.s) # Check setting the velocity in with_observer_stationary_relative_to sc4 = sc2.with_observer_stationary_relative_to('icrs', velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s) # Observer once again moving away from target but faster assert_quantity_allclose(sc4.radial_velocity, 4 * u.km / u.s) # Check that we can also pass frame classes instead of names sc5 = sc2.with_observer_stationary_relative_to(ICRS, velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s) assert_quantity_allclose(sc5.radial_velocity, 4 * u.km / u.s) # And make sure we can also pass instances of classes without data sc6 = sc2.with_observer_stationary_relative_to(ICRS(), velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s) assert_quantity_allclose(sc6.radial_velocity, 4 * u.km / u.s) # And with data provided no velocities are present sc7 = sc2.with_observer_stationary_relative_to(ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type='cartesian'), velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s) assert_quantity_allclose(sc7.radial_velocity, 4 * u.km / u.s) # And also have the ability to pass frames with velocities already defined sc8 = sc2.with_observer_stationary_relative_to(ICRS(0 * u.km, 0 * u.km, 0 * u.km, 2**0.5 * u.km / u.s, 0 * u.km / u.s, 2**0.5 * u.km / u.s, representation_type='cartesian', differential_type='cartesian')) assert_quantity_allclose(sc8.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s) # Make sure that things work properly if passing a SkyCoord sc9 = sc2.with_observer_stationary_relative_to(SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type='cartesian')), velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s) assert_quantity_allclose(sc9.radial_velocity, 4 * u.km / u.s) sc10 = sc2.with_observer_stationary_relative_to(SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km, 2**0.5 * u.km / u.s, 0 * u.km / u.s, 2**0.5 * u.km / u.s, representation_type='cartesian', differential_type='cartesian'))) assert_quantity_allclose(sc10.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s) # But we shouldn't be able to pass both a frame with velocities, and explicit velocities with pytest.raises(ValueError, match='frame already has differentials, cannot also specify velocity'): sc2.with_observer_stationary_relative_to(ICRS(0 * u.km, 0 * u.km, 0 * u.km, 2**0.5 * u.km / u.s, 0 * u.km / u.s, 2**0.5 * u.km / u.s, representation_type='cartesian', differential_type='cartesian'), velocity=[-2**0.5, 0, -2**0.5] * u.km / u.s) # And velocities should have three elements with pytest.raises(ValueError, match='velocity should be a Quantity vector with 3 elements'): sc2.with_observer_stationary_relative_to(ICRS, velocity=[-2**0.5, 0, -2**0.5, -3] * u.km / u.s) # Make sure things don't change depending on what frame class is used for reference sc11 = sc2.with_observer_stationary_relative_to(SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km, 2**0.5 * u.km / u.s, 0 * u.km / u.s, 2**0.5 * u.km / u.s, representation_type='cartesian', differential_type='cartesian')).transform_to(Galactic)) assert_quantity_allclose(sc11.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s) # Check that it is possible to preserve the observer frame sc12 = sc2.with_observer_stationary_relative_to(LSRD) sc13 = sc2.with_observer_stationary_relative_to(LSRD, preserve_observer_frame=True) assert isinstance(sc12.observer, Galactic) assert isinstance(sc13.observer, ICRS) def test_los_shift_radial_velocity(): # Tests to make sure that with_radial_velocity_shift correctly calculates # the new radial velocity # First check case where observer and/or target aren't specified sc1 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s) sc2 = sc1.with_radial_velocity_shift(1 * u.km / u.s) assert_quantity_allclose(sc2.radial_velocity, 2 * u.km / u.s) sc3 = sc1.with_radial_velocity_shift(-3 * u.km / u.s) assert_quantity_allclose(sc3.radial_velocity, -2 * u.km / u.s) with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): sc4 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s, observer=gcrs_not_origin) sc5 = sc4.with_radial_velocity_shift(1 * u.km / u.s) assert_quantity_allclose(sc5.radial_velocity, 2 * u.km / u.s) sc6 = sc4.with_radial_velocity_shift(-3 * u.km / u.s) assert_quantity_allclose(sc6.radial_velocity, -2 * u.km / u.s) with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): sc7 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s, target=ICRS(10 * u.deg, 20 * u.deg)) sc8 = sc7.with_radial_velocity_shift(1 * u.km / u.s) assert_quantity_allclose(sc8.radial_velocity, 2 * u.km / u.s) sc9 = sc7.with_radial_velocity_shift(-3 * u.km / u.s) assert_quantity_allclose(sc9.radial_velocity, -2 * u.km / u.s) # Check that things still work when both observer and target are specified with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): sc10 = SpectralCoord(500 * u.nm, observer=ICRS(0 * u.deg, 0 * u.deg, distance=1 * u.m), target=ICRS(10 * u.deg, 20 * u.deg, radial_velocity=1 * u.km / u.s, distance=10 * u.kpc)) sc11 = sc10.with_radial_velocity_shift(1 * u.km / u.s) assert_quantity_allclose(sc11.radial_velocity, 2 * u.km / u.s) sc12 = sc10.with_radial_velocity_shift(-3 * u.km / u.s) assert_quantity_allclose(sc12.radial_velocity, -2 * u.km / u.s) # Check that things work if radial_velocity wasn't specified at all sc13 = SpectralCoord(500 * u.nm) sc14 = sc13.with_radial_velocity_shift(1 * u.km / u.s) assert_quantity_allclose(sc14.radial_velocity, 1 * u.km / u.s) sc15 = sc1.with_radial_velocity_shift() assert_quantity_allclose(sc15.radial_velocity, 1 * u.km / u.s) # Check that units are verified with pytest.raises(u.UnitsError, match="Argument must have unit physical " "type 'speed' for radial velocty or " "'dimensionless' for redshift."): sc1.with_radial_velocity_shift(target_shift=1 * u.kg) @pytest.mark.xfail def test_relativistic_radial_velocity(): # Test for when both observer and target have relativistic velocities. # This is not yet supported, so the test is xfailed for now. sc = SpectralCoord(500 * u.nm, observer=ICRS(0 * u.km, 0 * u.km, 0 * u.km, -0.5 * c, -0.5 * c, -0.5 * c, representation_type='cartesian', differential_type='cartesian'), target=ICRS(1 * u.kpc, 1 * u.kpc, 1 * u.kpc, 0.5 * c, 0.5 * c, 0.5 * c, representation_type='cartesian', differential_type='cartesian')) assert_quantity_allclose(sc.radial_velocity, 0.989743318610787 * u.km / u.s) # SCIENCE USE CASE TESTS def test_spectral_coord_jupiter(greenwich_earthlocation): """ Checks radial velocity between Earth and Jupiter """ obstime = time.Time('2018-12-13 9:00') obs = greenwich_earthlocation.get_gcrs(obstime) pos, vel = get_body_barycentric_posvel('jupiter', obstime) jupiter = SkyCoord(pos.with_differentials(CartesianDifferential(vel.xyz)), obstime=obstime) spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=jupiter) # The velocity should be less than ~43 + a bit extra, which is the # maximum possible earth-jupiter relative velocity. We check the exact # value here (determined from SpectralCoord, so this serves as a test to # check that this value doesn't change - the value is not a ground truth) assert_quantity_allclose(spc.radial_velocity, -7.35219854 * u.km / u.s) def test_spectral_coord_alphacen(greenwich_earthlocation): """ Checks radial velocity between Earth and Alpha Centauri """ obstime = time.Time('2018-12-13 9:00') obs = greenwich_earthlocation.get_gcrs(obstime) # Coordinates were obtained from the following then hard-coded to avoid download # acen = SkyCoord.from_name('alpha cen') acen = SkyCoord(ra=219.90085*u.deg, dec=-60.83562*u.deg, frame='icrs', distance=4.37*u.lightyear, radial_velocity=-18.*u.km/u.s) spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=acen) # The velocity should be less than ~18 + 30 + a bit extra, which is the # maximum possible relative velocity. We check the exact value here # (determined from SpectralCoord, so this serves as a test to check that # this value doesn't change - the value is not a ground truth) assert_quantity_allclose(spc.radial_velocity, -26.328301 * u.km / u.s) def test_spectral_coord_m31(greenwich_earthlocation): """ Checks radial velocity between Earth and M31 """ obstime = time.Time('2018-12-13 9:00') obs = greenwich_earthlocation.get_gcrs(obstime) # Coordinates were obtained from the following then hard-coded to avoid download # m31 = SkyCoord.from_name('M31') m31 = SkyCoord(ra=10.6847*u.deg, dec=41.269*u.deg, distance=710*u.kpc, radial_velocity=-300*u.km/u.s) spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=m31) # The velocity should be less than ~300 + 30 + a bit extra in km/s, which # is the maximum possible relative velocity. We check the exact values # here (determined from SpectralCoord, so this serves as a test to check # that this value doesn't change - the value is not a ground truth) assert_quantity_allclose(spc.radial_velocity, -279.755128 * u.km / u.s) assert_allclose(spc.redshift, -0.0009327276702120191) def test_shift_to_rest_galaxy(): """ This tests storing a spectral coordinate with a specific redshift, and then doing basic rest-to-observed-and-back transformations """ z = 5 rest_line_wls = [5007, 6563]*u.AA observed_spc = SpectralCoord(rest_line_wls*(z+1), redshift=z) rest_spc = observed_spc.to_rest() # alternatively: # rest_spc = observed_spc.with_observer(observed_spec.target) # although then it would have to be clearly documented, or the `to_rest` # implemented in Spectrum1D? assert_quantity_allclose(rest_spc, rest_line_wls) # No frames are explicitly defined, so to the user, the observer and # target are not set. with pytest.raises(AttributeError): assert_frame_allclose(rest_spc.observer, rest_spc.target) def test_shift_to_rest_star_withobserver(greenwich_earthlocation): rv = -8.3283011*u.km/u.s rest_line_wls = [5007, 6563]*u.AA obstime = time.Time('2018-12-13 9:00') eloc = greenwich_earthlocation obs = eloc.get_gcrs(obstime) acen = SkyCoord(ra=219.90085*u.deg, dec=-60.83562*u.deg, frame='icrs', distance=4.37*u.lightyear) # Note that above the rv is missing from the SkyCoord. # That's intended, as it will instead be set in the `SpectralCoord`. But # the SpectralCoord machinery should yield something comparable to test_ # spectral_coord_alphacen with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): observed_spc = SpectralCoord(rest_line_wls*(rv/c + 1), observer=obs, target=acen) rest_spc = observed_spc.to_rest() assert_quantity_allclose(rest_spc, rest_line_wls) barycentric_spc = observed_spc.with_observer_stationary_relative_to('icrs') baryrest_spc = barycentric_spc.to_rest() assert quantity_allclose(baryrest_spc, rest_line_wls) # now make sure the change the barycentric shift did is comparable to the # offset rv_correction produces # barytarg = SkyCoord(barycentric_spc.target.frame) # should be this but that doesn't work for unclear reasons barytarg = SkyCoord(barycentric_spc.target.data.without_differentials(), frame=barycentric_spc.target.realize_frame(None)) vcorr = barytarg.radial_velocity_correction(kind='barycentric', obstime=obstime, location=eloc) drv = baryrest_spc.radial_velocity - observed_spc.radial_velocity # note this probably will not work on the first try, but it's ok if this is # "good enough", where good enough is estimated below. But that could be # adjusted if we think that's too aggressive of a precision target for what # the machinery can handle # with pytest.raises(AssertionError): assert_quantity_allclose(vcorr, drv, atol=10*u.m/u.s) gcrs_origin = GCRS(CartesianRepresentation([0*u.km, 0*u.km, 0*u.km])) gcrs_not_origin = GCRS(CartesianRepresentation([1*u.km, 0*u.km, 0*u.km])) @pytest.mark.parametrize("sc_kwargs", [ dict(radial_velocity=0*u.km/u.s), dict(observer=gcrs_origin, radial_velocity=0*u.km/u.s), dict(target=gcrs_origin, radial_velocity=0*u.km/u.s), dict(observer=gcrs_origin, target=gcrs_not_origin)]) def test_los_shift(sc_kwargs): wl = [4000, 5000]*u.AA with nullcontext() if 'observer' not in sc_kwargs and 'target' not in sc_kwargs else pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): sc_init = SpectralCoord(wl, **sc_kwargs) # these should always work in *all* cases because it's unambiguous that # a target shift should behave this way new_sc1 = sc_init.with_radial_velocity_shift(.1) assert_quantity_allclose(new_sc1, wl*1.1) new_sc2 = sc_init.with_radial_velocity_shift(.1*u.dimensionless_unscaled) # interpret at redshift assert_quantity_allclose(new_sc1, new_sc2) new_sc3 = sc_init.with_radial_velocity_shift(-100*u.km/u.s) assert_quantity_allclose(new_sc3, wl*(1 + (-100*u.km/u.s / c))) # now try the cases where observer is specified as well/instead if sc_init.observer is None or sc_init.target is None: with pytest.raises(ValueError): # both must be specified if you're going to mess with observer sc_init.with_radial_velocity_shift(observer_shift=.1) if sc_init.observer is not None and sc_init.target is not None: # redshifting the observer should *blushift* the LOS velocity since # its the observer-to-target vector that matters new_sc4 = sc_init.with_radial_velocity_shift(observer_shift=.1) assert_quantity_allclose(new_sc4, wl/1.1) # an equal shift in both should produce no offset at all new_sc5 = sc_init.with_radial_velocity_shift(target_shift=.1, observer_shift=.1) assert_quantity_allclose(new_sc5, wl) def test_asteroid_velocity_frame_shifts(): """ This test mocks up the use case of observing a spectrum of an asteroid at different times and from different observer locations. """ time1 = time.Time('2018-12-13 9:00') dt = 12*u.hour time2 = time1 + dt # make the silly but simplifying assumption that the astroid is moving along # the x-axis of GCRS, and makes a 10 earth-radius closest approach v_ast = [5, 0, 0]*u.km/u.s x1 = -v_ast[0]*dt / 2 x2 = v_ast[0]*dt / 2 z = 10*u.Rearth cdiff = CartesianDifferential(v_ast) asteroid_loc1 = GCRS(CartesianRepresentation(x1.to(u.km), 0*u.km, z.to(u.km), differentials=cdiff), obstime=time1) asteroid_loc2 = GCRS(CartesianRepresentation(x2.to(u.km), 0*u.km, z.to(u.km), differentials=cdiff), obstime=time2) # assume satellites that are essentially fixed in geostationary orbit on # opposite sides of the earth observer1 = GCRS(CartesianRepresentation([0*u.km, 35000*u.km, 0*u.km]), obstime=time1) observer2 = GCRS(CartesianRepresentation([0*u.km, -35000*u.km, 0*u.km]), obstime=time2) wls = np.linspace(4000, 7000, 100) * u.AA with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): spec_coord1 = SpectralCoord(wls, observer=observer1, target=asteroid_loc1) assert spec_coord1.radial_velocity < 0*u.km/u.s assert spec_coord1.radial_velocity > -5*u.km/u.s with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): spec_coord2 = SpectralCoord(wls, observer=observer2, target=asteroid_loc2) assert spec_coord2.radial_velocity > 0*u.km/u.s assert spec_coord2.radial_velocity < 5*u.km/u.s # now check the behavior of with_observer_stationary_relative_to: we shift each coord # into the velocity frame of its *own* target. That would then be a # spectralcoord that would allow direct physical comparison of the two # different spec_corrds. There's no way to test that, without # actual data, though. # spec_coord2 is redshifted, so we test that it behaves the way "shifting # to rest frame" should - the as-observed spectral coordinate should become # the rest frame, so something that starts out red should become bluer target_sc2 = spec_coord2.with_observer_stationary_relative_to(spec_coord2.target) assert np.all(target_sc2 < spec_coord2) # rv/redshift should be 0 since the observer and target velocities should # be the same assert_quantity_allclose(target_sc2.radial_velocity, 0*u.km/u.s, atol=1e-7 * u.km / u.s) # check that the same holds for spec_coord1, but be more specific: it # should follow the standard redshift formula (which in this case yields # a blueshift, although the formula is the same as 1+z) target_sc1 = spec_coord1.with_observer_stationary_relative_to(spec_coord1.target) assert_quantity_allclose(target_sc1, spec_coord1/(1+spec_coord1.redshift)) # TODO: Figure out what is meant by the below use case # ensure the "target-rest" use gives the same answer # target_sc1_alt = spec_coord1.with_observer_stationary_relative_to('target-rest') # assert_quantity_allclose(target_sc1, target_sc1_alt) def test_spectral_coord_from_sky_coord_without_distance(): # see https://github.com/astropy/specutils/issues/658 for issue context obs = SkyCoord(0 * u.m, 0 * u.m, 0 * u.m, representation_type='cartesian') with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): coord = SpectralCoord([1, 2, 3] * u.micron, observer=obs) # coord.target = SkyCoord.from_name('m31') # <- original issue, but below is the same but requires no remote data access with pytest.warns(AstropyUserWarning, match='Distance on coordinate object is dimensionless'): coord.target = SkyCoord(ra=10.68470833*u.deg, dec=41.26875*u.deg) EXPECTED_VELOCITY_FRAMES = {'geocent': 'gcrs', 'heliocent': 'hcrs', 'lsrk': 'lsrk', 'lsrd': 'lsrd', 'galactoc': FITSWCS_VELOCITY_FRAMES['GALACTOC'], 'localgrp': FITSWCS_VELOCITY_FRAMES['LOCALGRP']} @pytest.mark.parametrize('specsys', list(EXPECTED_VELOCITY_FRAMES)) @pytest.mark.slow def test_spectralcoord_accuracy(specsys): # This is a test to check the numerical results of transformations between # different velocity frames in SpectralCoord. This compares the velocity # shifts determined with SpectralCoord to those determined from the rv # package in Starlink. velocity_frame = EXPECTED_VELOCITY_FRAMES[specsys] reference_filename = get_pkg_data_filename('accuracy/data/rv.ecsv') reference_table = Table.read(reference_filename, format='ascii.ecsv') rest = 550 * u.nm with iers.conf.set_temp('auto_download', False): for row in reference_table: observer = EarthLocation.from_geodetic(-row['obslon'], row['obslat']).get_itrs(obstime=row['obstime']) with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'): sc_topo = SpectralCoord(545 * u.nm, observer=observer, target=row['target']) # FIXME: A warning is emitted for dates after MJD=57754.0 even # though the leap second table should be valid until the end of # 2020. with nullcontext() if row['obstime'].mjd < 57754 else pytest.warns(AstropyWarning, match='Tried to get polar motions'): sc_final = sc_topo.with_observer_stationary_relative_to(velocity_frame) delta_vel = (sc_topo.to(u.km / u.s, doppler_convention='relativistic', doppler_rest=rest) - sc_final.to(u.km / u.s, doppler_convention='relativistic', doppler_rest=rest)) if specsys == 'galactoc': assert_allclose(delta_vel.to_value(u.km / u.s), row[specsys.lower()], atol=30) else: assert_allclose(delta_vel.to_value(u.km / u.s), row[specsys.lower()], atol=0.02, rtol=0.002) # TODO: add test when target is not ICRS # TODO: add test when SpectralCoord is in velocity to start with
4c9d24af65310ea39ee6cbb12618b6519c29fa838accf6c4ba19149925627226
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Accuracy tests for GCRS coordinate transformations, primarily to/from AltAz. """ import os import warnings from importlib import metadata import pytest import numpy as np import erfa from astropy import units as u from astropy.tests.helper import assert_quantity_allclose as assert_allclose from astropy.time import Time from astropy.coordinates import ( EarthLocation, get_sun, ICRS, GCRS, CIRS, ITRS, AltAz, HADec, PrecessedGeocentric, CartesianRepresentation, SkyCoord, CartesianDifferential, SphericalRepresentation, UnitSphericalRepresentation, HCRS, HeliocentricMeanEcliptic, TEME, TETE) from astropy.coordinates.solar_system import _apparent_position_in_true_coordinates, get_body from astropy.utils import iers from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning from astropy.utils.compat.optional_deps import HAS_JPLEPHEM from astropy.coordinates.angle_utilities import golden_spiral_grid from astropy.coordinates.builtin_frames.intermediate_rotation_transforms import ( get_location_gcrs, tete_to_itrs_mat, gcrs_to_cirs_mat, cirs_to_itrs_mat) from astropy.coordinates.builtin_frames.utils import get_jd12 from astropy.coordinates import solar_system_ephemeris from astropy.units import allclose CI = os.environ.get('CI', False) == "true" def test_icrs_cirs(): """ Check a few cases of ICRS<->CIRS for consistency. Also includes the CIRS<->CIRS transforms at different times, as those go through ICRS """ usph = golden_spiral_grid(200) dist = np.linspace(0., 1, len(usph)) * u.pc inod = ICRS(usph) iwd = ICRS(ra=usph.lon, dec=usph.lat, distance=dist) cframe1 = CIRS() cirsnod = inod.transform_to(cframe1) # uses the default time # first do a round-tripping test inod2 = cirsnod.transform_to(ICRS()) assert_allclose(inod.ra, inod2.ra) assert_allclose(inod.dec, inod2.dec) # now check that a different time yields different answers cframe2 = CIRS(obstime=Time('J2005')) cirsnod2 = inod.transform_to(cframe2) assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8) assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8) # parallax effects should be included, so with and w/o distance should be different cirswd = iwd.transform_to(cframe1) assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8) assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8) # and the distance should transform at least somehow assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8) # now check that the cirs self-transform works as expected cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op assert_allclose(cirsnod.ra, cirsnod3.ra) assert_allclose(cirsnod.dec, cirsnod3.dec) cirsnod4 = cirsnod.transform_to(cframe2) # should be different assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8) assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8) cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same assert_allclose(cirsnod.ra, cirsnod5.ra) assert_allclose(cirsnod.dec, cirsnod5.dec) usph = golden_spiral_grid(200) dist = np.linspace(0.5, 1, len(usph)) * u.pc icrs_coords = [ICRS(usph), ICRS(usph.lon, usph.lat, distance=dist)] gcrs_frames = [GCRS(), GCRS(obstime=Time('J2005'))] @pytest.mark.parametrize('icoo', icrs_coords) def test_icrs_gcrs(icoo): """ Check ICRS<->GCRS for consistency """ gcrscoo = icoo.transform_to(gcrs_frames[0]) # uses the default time # first do a round-tripping test icoo2 = gcrscoo.transform_to(ICRS()) assert_allclose(icoo.distance, icoo2.distance) assert_allclose(icoo.ra, icoo2.ra) assert_allclose(icoo.dec, icoo2.dec) assert isinstance(icoo2.data, icoo.data.__class__) # now check that a different time yields different answers gcrscoo2 = icoo.transform_to(gcrs_frames[1]) assert not allclose(gcrscoo.ra, gcrscoo2.ra, rtol=1e-8, atol=1e-10*u.deg) assert not allclose(gcrscoo.dec, gcrscoo2.dec, rtol=1e-8, atol=1e-10*u.deg) # now check that the cirs self-transform works as expected gcrscoo3 = gcrscoo.transform_to(gcrs_frames[0]) # should be a no-op assert_allclose(gcrscoo.ra, gcrscoo3.ra) assert_allclose(gcrscoo.dec, gcrscoo3.dec) gcrscoo4 = gcrscoo.transform_to(gcrs_frames[1]) # should be different assert not allclose(gcrscoo4.ra, gcrscoo.ra, rtol=1e-8, atol=1e-10*u.deg) assert not allclose(gcrscoo4.dec, gcrscoo.dec, rtol=1e-8, atol=1e-10*u.deg) gcrscoo5 = gcrscoo4.transform_to(gcrs_frames[0]) # should be back to the same assert_allclose(gcrscoo.ra, gcrscoo5.ra, rtol=1e-8, atol=1e-10*u.deg) assert_allclose(gcrscoo.dec, gcrscoo5.dec, rtol=1e-8, atol=1e-10*u.deg) # also make sure that a GCRS with a different geoloc/geovel gets a different answer # roughly a moon-like frame gframe3 = GCRS(obsgeoloc=[385000., 0, 0]*u.km, obsgeovel=[1, 0, 0]*u.km/u.s) gcrscoo6 = icoo.transform_to(gframe3) # should be different assert not allclose(gcrscoo.ra, gcrscoo6.ra, rtol=1e-8, atol=1e-10*u.deg) assert not allclose(gcrscoo.dec, gcrscoo6.dec, rtol=1e-8, atol=1e-10*u.deg) icooviag3 = gcrscoo6.transform_to(ICRS()) # and now back to the original assert_allclose(icoo.ra, icooviag3.ra) assert_allclose(icoo.dec, icooviag3.dec) @pytest.mark.parametrize('gframe', gcrs_frames) def test_icrs_gcrs_dist_diff(gframe): """ Check that with and without distance give different ICRS<->GCRS answers """ gcrsnod = icrs_coords[0].transform_to(gframe) gcrswd = icrs_coords[1].transform_to(gframe) # parallax effects should be included, so with and w/o distance should be different assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10*u.deg) assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10*u.deg) # and the distance should transform at least somehow assert not allclose(gcrswd.distance, icrs_coords[1].distance, rtol=1e-8, atol=1e-10*u.pc) def test_cirs_to_altaz(): """ Check the basic CIRS<->AltAz transforms. More thorough checks implicitly happen in `test_iau_fullstack` """ from astropy.coordinates import EarthLocation usph = golden_spiral_grid(200) dist = np.linspace(0.5, 1, len(usph)) * u.pc cirs = CIRS(usph, obstime='J2000') crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist) cirscart = CIRS(crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation) loc = EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m) altazframe = AltAz(location=loc, obstime=Time('J2005')) cirs2 = cirs.transform_to(altazframe).transform_to(cirs) cirs3 = cirscart.transform_to(altazframe).transform_to(cirs) # check round-tripping assert_allclose(cirs.ra, cirs2.ra) assert_allclose(cirs.dec, cirs2.dec) assert_allclose(cirs.ra, cirs3.ra) assert_allclose(cirs.dec, cirs3.dec) def test_cirs_to_hadec(): """ Check the basic CIRS<->HADec transforms. """ from astropy.coordinates import EarthLocation usph = golden_spiral_grid(200) dist = np.linspace(0.5, 1, len(usph)) * u.pc cirs = CIRS(usph, obstime='J2000') crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist) cirscart = CIRS(crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation) loc = EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m) hadecframe = HADec(location=loc, obstime=Time('J2005')) cirs2 = cirs.transform_to(hadecframe).transform_to(cirs) cirs3 = cirscart.transform_to(hadecframe).transform_to(cirs) # check round-tripping assert_allclose(cirs.ra, cirs2.ra) assert_allclose(cirs.dec, cirs2.dec) assert_allclose(cirs.ra, cirs3.ra) assert_allclose(cirs.dec, cirs3.dec) def test_itrs_topo_to_altaz_with_refraction(): loc = EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m) usph = golden_spiral_grid(200) dist = np.linspace(1., 1000.0, len(usph)) * u.au icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist) altaz_frame1 = AltAz(obstime = 'J2000', location=loc) altaz_frame2 = AltAz(obstime = 'J2000', location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5) cirs_frame = CIRS(obstime = 'J2000', location=loc) itrs_frame = ITRS(location=loc) # Normal route # No Refraction altaz1 = icrs.transform_to(altaz_frame1) # Refraction added altaz2 = icrs.transform_to(altaz_frame2) # Refraction removed cirs = altaz2.transform_to(cirs_frame) altaz3 = cirs.transform_to(altaz_frame1) # Through ITRS # No Refraction itrs = icrs.transform_to(itrs_frame) altaz11 = itrs.transform_to(altaz_frame1) assert_allclose(altaz11.az - altaz1.az, 0*u.mas, atol=0.1*u.mas) assert_allclose(altaz11.alt - altaz1.alt, 0*u.mas, atol=0.1*u.mas) assert_allclose(altaz11.distance - altaz1.distance, 0*u.cm, atol=10.0*u.cm) # Round trip itrs11 = altaz11.transform_to(itrs_frame) assert_allclose(itrs11.x, itrs.x) assert_allclose(itrs11.y, itrs.y) assert_allclose(itrs11.z, itrs.z) # Refraction added altaz22 = itrs.transform_to(altaz_frame2) assert_allclose(altaz22.az - altaz2.az, 0*u.mas, atol=0.1*u.mas) assert_allclose(altaz22.alt - altaz2.alt, 0*u.mas, atol=0.1*u.mas) assert_allclose(altaz22.distance - altaz2.distance, 0*u.cm, atol=10.0*u.cm) # Refraction removed itrs = altaz22.transform_to(itrs_frame) altaz33 = itrs.transform_to(altaz_frame1) assert_allclose(altaz33.az - altaz3.az, 0*u.mas, atol=0.1*u.mas) assert_allclose(altaz33.alt - altaz3.alt, 0*u.mas, atol=0.1*u.mas) assert_allclose(altaz33.distance - altaz3.distance, 0*u.cm, atol=10.0*u.cm) def test_itrs_topo_to_hadec_with_refraction(): loc = EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m) usph = golden_spiral_grid(200) dist = np.linspace(1., 1000.0, len(usph)) * u.au icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist) hadec_frame1 = HADec(obstime = 'J2000', location=loc) hadec_frame2 = HADec(obstime = 'J2000', location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5) cirs_frame = CIRS(obstime = 'J2000', location=loc) itrs_frame = ITRS(location=loc) # Normal route # No Refraction hadec1 = icrs.transform_to(hadec_frame1) # Refraction added hadec2 = icrs.transform_to(hadec_frame2) # Refraction removed cirs = hadec2.transform_to(cirs_frame) hadec3 = cirs.transform_to(hadec_frame1) # Through ITRS # No Refraction itrs = icrs.transform_to(itrs_frame) hadec11 = itrs.transform_to(hadec_frame1) assert_allclose(hadec11.ha - hadec1.ha, 0*u.mas, atol=0.1*u.mas) assert_allclose(hadec11.dec - hadec1.dec, 0*u.mas, atol=0.1*u.mas) assert_allclose(hadec11.distance - hadec1.distance, 0*u.cm, atol=10.0*u.cm) # Round trip itrs11 = hadec11.transform_to(itrs_frame) assert_allclose(itrs11.x, itrs.x) assert_allclose(itrs11.y, itrs.y) assert_allclose(itrs11.z, itrs.z) # Refraction added hadec22 = itrs.transform_to(hadec_frame2) assert_allclose(hadec22.ha - hadec2.ha, 0*u.mas, atol=0.1*u.mas) assert_allclose(hadec22.dec - hadec2.dec, 0*u.mas, atol=0.1*u.mas) assert_allclose(hadec22.distance - hadec2.distance, 0*u.cm, atol=10.0*u.cm) # Refraction removed itrs = hadec22.transform_to(itrs_frame) hadec33 = itrs.transform_to(hadec_frame1) assert_allclose(hadec33.ha - hadec3.ha, 0*u.mas, atol=0.1*u.mas) assert_allclose(hadec33.dec - hadec3.dec, 0*u.mas, atol=0.1*u.mas) assert_allclose(hadec33.distance - hadec3.distance, 0*u.cm, atol=10.0*u.cm) def test_gcrs_itrs(): """ Check basic GCRS<->ITRS transforms for round-tripping. """ usph = golden_spiral_grid(200) gcrs = GCRS(usph, obstime='J2000') gcrs6 = GCRS(usph, obstime='J2006') gcrs2 = gcrs.transform_to(ITRS()).transform_to(gcrs) gcrs6_2 = gcrs6.transform_to(ITRS()).transform_to(gcrs) assert_allclose(gcrs.ra, gcrs2.ra) assert_allclose(gcrs.dec, gcrs2.dec) # these should be different: assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8) assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8) # also try with the cartesian representation gcrsc = gcrs.realize_frame(gcrs.data) gcrsc.representation_type = CartesianRepresentation gcrsc2 = gcrsc.transform_to(ITRS()).transform_to(gcrsc) assert_allclose(gcrsc.spherical.lon, gcrsc2.ra) assert_allclose(gcrsc.spherical.lat, gcrsc2.dec) def test_cirs_itrs(): """ Check basic CIRS<->ITRS geocentric transforms for round-tripping. """ usph = golden_spiral_grid(200) cirs = CIRS(usph, obstime='J2000') cirs6 = CIRS(usph, obstime='J2006') cirs2 = cirs.transform_to(ITRS()).transform_to(cirs) cirs6_2 = cirs6.transform_to(ITRS()).transform_to(cirs) # different obstime # just check round-tripping assert_allclose(cirs.ra, cirs2.ra) assert_allclose(cirs.dec, cirs2.dec) assert not allclose(cirs.ra, cirs6_2.ra) assert not allclose(cirs.dec, cirs6_2.dec) def test_cirs_itrs_topo(): """ Check basic CIRS<->ITRS topocentric transforms for round-tripping. """ loc = EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m) usph = golden_spiral_grid(200) cirs = CIRS(usph, obstime='J2000', location=loc) cirs6 = CIRS(usph, obstime='J2006', location=loc) cirs2 = cirs.transform_to(ITRS(location=loc)).transform_to(cirs) cirs6_2 = cirs6.transform_to(ITRS(location=loc)).transform_to(cirs) # different obstime # just check round-tripping assert_allclose(cirs.ra, cirs2.ra) assert_allclose(cirs.dec, cirs2.dec) assert not allclose(cirs.ra, cirs6_2.ra) assert not allclose(cirs.dec, cirs6_2.dec) def test_gcrs_cirs(): """ Check GCRS<->CIRS transforms for round-tripping. More complicated than the above two because it's multi-hop """ usph = golden_spiral_grid(200) gcrs = GCRS(usph, obstime='J2000') gcrs6 = GCRS(usph, obstime='J2006') gcrs2 = gcrs.transform_to(CIRS()).transform_to(gcrs) gcrs6_2 = gcrs6.transform_to(CIRS()).transform_to(gcrs) assert_allclose(gcrs.ra, gcrs2.ra) assert_allclose(gcrs.dec, gcrs2.dec) # these should be different: assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8) assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8) # now try explicit intermediate pathways and ensure they're all consistent gcrs3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(ITRS()).transform_to(gcrs) assert_allclose(gcrs.ra, gcrs3.ra) assert_allclose(gcrs.dec, gcrs3.dec) gcrs4 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(ICRS()).transform_to(gcrs) assert_allclose(gcrs.ra, gcrs4.ra) assert_allclose(gcrs.dec, gcrs4.dec) def test_gcrs_altaz(): """ Check GCRS<->AltAz transforms for round-tripping. Has multiple paths """ from astropy.coordinates import EarthLocation usph = golden_spiral_grid(128) gcrs = GCRS(usph, obstime='J2000')[None] # broadcast with times below # check array times sure N-d arrays work times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format='jd')[:, None] loc = EarthLocation(lon=10 * u.deg, lat=80. * u.deg) aaframe = AltAz(obstime=times, location=loc) aa1 = gcrs.transform_to(aaframe) aa2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(aaframe) aa3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(aaframe) # make sure they're all consistent assert_allclose(aa1.alt, aa2.alt) assert_allclose(aa1.az, aa2.az) assert_allclose(aa1.alt, aa3.alt) assert_allclose(aa1.az, aa3.az) def test_gcrs_hadec(): """ Check GCRS<->HADec transforms for round-tripping. Has multiple paths """ from astropy.coordinates import EarthLocation usph = golden_spiral_grid(128) gcrs = GCRS(usph, obstime='J2000') # broadcast with times below # check array times sure N-d arrays work times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format='jd')[:, np.newaxis] loc = EarthLocation(lon=10 * u.deg, lat=80. * u.deg) hdframe = HADec(obstime=times, location=loc) hd1 = gcrs.transform_to(hdframe) hd2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(hdframe) hd3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(hdframe) # make sure they're all consistent assert_allclose(hd1.dec, hd2.dec) assert_allclose(hd1.ha, hd2.ha) assert_allclose(hd1.dec, hd3.dec) assert_allclose(hd1.ha, hd3.ha) def test_precessed_geocentric(): assert PrecessedGeocentric().equinox.jd == Time('J2000').jd gcrs_coo = GCRS(180*u.deg, 2*u.deg, distance=10000*u.km) pgeo_coo = gcrs_coo.transform_to(PrecessedGeocentric()) assert np.abs(gcrs_coo.ra - pgeo_coo.ra) > 10*u.marcsec assert np.abs(gcrs_coo.dec - pgeo_coo.dec) > 10*u.marcsec assert_allclose(gcrs_coo.distance, pgeo_coo.distance) gcrs_roundtrip = pgeo_coo.transform_to(GCRS()) assert_allclose(gcrs_coo.ra, gcrs_roundtrip.ra) assert_allclose(gcrs_coo.dec, gcrs_roundtrip.dec) assert_allclose(gcrs_coo.distance, gcrs_roundtrip.distance) pgeo_coo2 = gcrs_coo.transform_to(PrecessedGeocentric(equinox='B1850')) assert np.abs(gcrs_coo.ra - pgeo_coo2.ra) > 1.5*u.deg assert np.abs(gcrs_coo.dec - pgeo_coo2.dec) > 0.5*u.deg assert_allclose(gcrs_coo.distance, pgeo_coo2.distance) gcrs2_roundtrip = pgeo_coo2.transform_to(GCRS()) assert_allclose(gcrs_coo.ra, gcrs2_roundtrip.ra) assert_allclose(gcrs_coo.dec, gcrs2_roundtrip.dec) assert_allclose(gcrs_coo.distance, gcrs2_roundtrip.distance) def test_precessed_geocentric_different_obstime(): # Create two PrecessedGeocentric frames with different obstime precessedgeo1 = PrecessedGeocentric(obstime='2021-09-07') precessedgeo2 = PrecessedGeocentric(obstime='2021-06-07') # GCRS->PrecessedGeocentric should give different results for the two frames gcrs_coord = GCRS(10*u.deg, 20*u.deg, 3*u.AU, obstime=precessedgeo1.obstime) pg_coord1 = gcrs_coord.transform_to(precessedgeo1) pg_coord2 = gcrs_coord.transform_to(precessedgeo2) assert not pg_coord1.is_equivalent_frame(pg_coord2) assert not allclose(pg_coord1.cartesian.xyz, pg_coord2.cartesian.xyz) # Looping back to GCRS should return the original coordinate loopback1 = pg_coord1.transform_to(gcrs_coord) loopback2 = pg_coord2.transform_to(gcrs_coord) assert loopback1.is_equivalent_frame(gcrs_coord) assert loopback2.is_equivalent_frame(gcrs_coord) assert_allclose(loopback1.cartesian.xyz, gcrs_coord.cartesian.xyz) assert_allclose(loopback2.cartesian.xyz, gcrs_coord.cartesian.xyz) # shared by parametrized tests below. Some use the whole AltAz, others use just obstime totest_frames = [AltAz(location=EarthLocation(-90*u.deg, 65*u.deg), obstime=Time('J2000')), # J2000 is often a default so this might work when others don't AltAz(location=EarthLocation(120*u.deg, -35*u.deg), obstime=Time('J2000')), AltAz(location=EarthLocation(-90*u.deg, 65*u.deg), obstime=Time('2014-01-01 00:00:00')), AltAz(location=EarthLocation(-90*u.deg, 65*u.deg), obstime=Time('2014-08-01 08:00:00')), AltAz(location=EarthLocation(120*u.deg, -35*u.deg), obstime=Time('2014-01-01 00:00:00')) ] MOONDIST = 385000*u.km # approximate moon semi-major orbit axis of moon MOONDIST_CART = CartesianRepresentation(3**-0.5*MOONDIST, 3**-0.5*MOONDIST, 3**-0.5*MOONDIST) EARTHECC = 0.017 + 0.005 # roughly earth orbital eccentricity, but with an added tolerance @pytest.mark.parametrize('testframe', totest_frames) def test_gcrs_altaz_sunish(testframe): """ Sanity-check that the sun is at a reasonable distance from any altaz """ sun = get_sun(testframe.obstime) assert sun.frame.name == 'gcrs' # the .to(u.au) is not necessary, it just makes the asserts on failure more readable assert (EARTHECC - 1)*u.au < sun.distance.to(u.au) < (EARTHECC + 1)*u.au sunaa = sun.transform_to(testframe) assert (EARTHECC - 1)*u.au < sunaa.distance.to(u.au) < (EARTHECC + 1)*u.au @pytest.mark.parametrize('testframe', totest_frames) def test_gcrs_altaz_moonish(testframe): """ Sanity-check that an object resembling the moon goes to the right place with a GCRS->AltAz transformation """ moon = GCRS(MOONDIST_CART, obstime=testframe.obstime) moonaa = moon.transform_to(testframe) # now check that the distance change is similar to earth radius assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.au) < 7000*u.km # now check that it round-trips moon2 = moonaa.transform_to(moon) assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz) # also should add checks that the alt/az are different for different earth locations @pytest.mark.parametrize('testframe', totest_frames) def test_gcrs_altaz_bothroutes(testframe): """ Repeat of both the moonish and sunish tests above to make sure the two routes through the coordinate graph are consistent with each other """ sun = get_sun(testframe.obstime) sunaa_viaicrs = sun.transform_to(ICRS()).transform_to(testframe) sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe) moon = GCRS(MOONDIST_CART, obstime=testframe.obstime) moonaa_viaicrs = moon.transform_to(ICRS()).transform_to(testframe) moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe) assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz) assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz) @pytest.mark.parametrize('testframe', totest_frames) def test_cirs_altaz_moonish(testframe): """ Sanity-check that an object resembling the moon goes to the right place with a CIRS<->AltAz transformation """ moon = CIRS(MOONDIST_CART, obstime=testframe.obstime) moonaa = moon.transform_to(testframe) assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.km) < 7000*u.km # now check that it round-trips moon2 = moonaa.transform_to(moon) assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz) @pytest.mark.parametrize('testframe', totest_frames) def test_cirs_altaz_nodist(testframe): """ Check that a UnitSphericalRepresentation coordinate round-trips for the CIRS<->AltAz transformation. """ coo0 = CIRS(UnitSphericalRepresentation(10*u.deg, 20*u.deg), obstime=testframe.obstime) # check that it round-trips coo1 = coo0.transform_to(testframe).transform_to(coo0) assert_allclose(coo0.cartesian.xyz, coo1.cartesian.xyz) @pytest.mark.parametrize('testframe', totest_frames) def test_cirs_icrs_moonish(testframe): """ check that something like the moon goes to about the right distance from the ICRS origin when starting from CIRS """ moonish = CIRS(MOONDIST_CART, obstime=testframe.obstime) moonicrs = moonish.transform_to(ICRS()) assert 0.97*u.au < moonicrs.distance < 1.03*u.au @pytest.mark.parametrize('testframe', totest_frames) def test_gcrs_icrs_moonish(testframe): """ check that something like the moon goes to about the right distance from the ICRS origin when starting from GCRS """ moonish = GCRS(MOONDIST_CART, obstime=testframe.obstime) moonicrs = moonish.transform_to(ICRS()) assert 0.97*u.au < moonicrs.distance < 1.03*u.au @pytest.mark.parametrize('testframe', totest_frames) def test_icrs_gcrscirs_sunish(testframe): """ check that the ICRS barycenter goes to about the right distance from various ~geocentric frames (other than testframe) """ # slight offset to avoid divide-by-zero errors icrs = ICRS(0*u.deg, 0*u.deg, distance=10*u.km) gcrs = icrs.transform_to(GCRS(obstime=testframe.obstime)) assert (EARTHECC - 1)*u.au < gcrs.distance.to(u.au) < (EARTHECC + 1)*u.au cirs = icrs.transform_to(CIRS(obstime=testframe.obstime)) assert (EARTHECC - 1)*u.au < cirs.distance.to(u.au) < (EARTHECC + 1)*u.au itrs = icrs.transform_to(ITRS(obstime=testframe.obstime)) assert (EARTHECC - 1)*u.au < itrs.spherical.distance.to(u.au) < (EARTHECC + 1)*u.au @pytest.mark.parametrize('testframe', totest_frames) def test_icrs_altaz_moonish(testframe): """ Check that something expressed in *ICRS* as being moon-like goes to the right AltAz distance """ # we use epv00 instead of get_sun because get_sun includes aberration earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(testframe.obstime, 'tdb')) earth_icrs_xyz = earth_pv_bary[0]*u.au moonoffset = [0, 0, MOONDIST.value]*MOONDIST.unit moonish_icrs = ICRS(CartesianRepresentation(earth_icrs_xyz + moonoffset)) moonaa = moonish_icrs.transform_to(testframe) # now check that the distance change is similar to earth radius assert 1000*u.km < np.abs(moonaa.distance - MOONDIST).to(u.au) < 7000*u.km def test_gcrs_self_transform_closeby(): """ Tests GCRS self transform for objects which are nearby and thus have reasonable parallax. Moon positions were originally created using JPL DE432s ephemeris. The two lunar positions (one geocentric, one at a defined location) are created via a transformation from ICRS to two different GCRS frames. We test that the GCRS-GCRS self transform can correctly map one GCRS frame onto the other. """ t = Time("2014-12-25T07:00") moon_geocentric = SkyCoord(GCRS(318.10579159*u.deg, -11.65281165*u.deg, 365042.64880308*u.km, obstime=t)) # this is the location of the Moon as seen from La Palma obsgeoloc = [-5592982.59658935, -63054.1948592, 3059763.90102216]*u.m obsgeovel = [4.59798494, -407.84677071, 0.]*u.m/u.s moon_lapalma = SkyCoord(GCRS(318.7048445*u.deg, -11.98761996*u.deg, 369722.8231031*u.km, obstime=t, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)) transformed = moon_geocentric.transform_to(moon_lapalma.frame) delta = transformed.separation_3d(moon_lapalma) assert_allclose(delta, 0.0*u.m, atol=1*u.m) def test_teme_itrf(): """ Test case transform from TEME to ITRF. Test case derives from example on appendix C of Vallado, Crawford, Hujsak & Kelso (2006). See https://celestrak.com/publications/AIAA/2006-6753/AIAA-2006-6753-Rev2.pdf """ v_itrf = CartesianDifferential(-3.225636520, -2.872451450, 5.531924446, unit=u.km/u.s) p_itrf = CartesianRepresentation(-1033.479383, 7901.2952740, 6380.35659580, unit=u.km, differentials={'s': v_itrf}) t = Time("2004-04-06T07:51:28.386") teme = ITRS(p_itrf, obstime=t).transform_to(TEME(obstime=t)) v_teme = CartesianDifferential(-4.746131487, 0.785818041, 5.531931288, unit=u.km/u.s) p_teme = CartesianRepresentation(5094.18016210, 6127.64465050, 6380.34453270, unit=u.km, differentials={'s': v_teme}) assert_allclose(teme.cartesian.without_differentials().xyz, p_teme.without_differentials().xyz, atol=30*u.cm) assert_allclose(teme.cartesian.differentials['s'].d_xyz, p_teme.differentials['s'].d_xyz, atol=1.0*u.cm/u.s) # test round trip itrf = teme.transform_to(ITRS(obstime=t)) assert_allclose( itrf.cartesian.without_differentials().xyz, p_itrf.without_differentials().xyz, atol=100*u.cm ) assert_allclose( itrf.cartesian.differentials['s'].d_xyz, p_itrf.differentials['s'].d_xyz, atol=1*u.cm/u.s ) def test_precessedgeocentric_loopback(): from_coo = PrecessedGeocentric(1*u.deg, 2*u.deg, 3*u.AU, obstime='2001-01-01', equinox='2001-01-01') # Change just the obstime to_frame = PrecessedGeocentric(obstime='2001-06-30', equinox='2001-01-01') explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame) implicit_coo = from_coo.transform_to(to_frame) # Confirm that the explicit transformation changes the coordinate assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10) assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10) assert not allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10) # Confirm that the loopback matches the explicit transformation assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10) assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10) assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10) # Change just the equinox to_frame = PrecessedGeocentric(obstime='2001-01-01', equinox='2001-06-30') explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame) implicit_coo = from_coo.transform_to(to_frame) # Confirm that the explicit transformation changes the direction but not the distance assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10) assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10) assert allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10) # Confirm that the loopback matches the explicit transformation assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10) assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10) assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10) def test_teme_loopback(): from_coo = TEME(1*u.AU, 2*u.AU, 3*u.AU, obstime='2001-01-01') to_frame = TEME(obstime='2001-06-30') explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame) implicit_coo = from_coo.transform_to(to_frame) # Confirm that the explicit transformation changes the coordinate assert not allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10) # Confirm that the loopback matches the explicit transformation assert_allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10) @pytest.mark.remote_data def test_earth_orientation_table(monkeypatch): """Check that we can set the IERS table used as Earth Reference. Use the here and now to be sure we get a difference. """ monkeypatch.setattr('astropy.utils.iers.conf.auto_download', True) t = Time.now() location = EarthLocation(lat=0*u.deg, lon=0*u.deg) altaz = AltAz(location=location, obstime=t) sc = SkyCoord(1*u.deg, 2*u.deg) # Default: uses IERS_Auto, which will give a prediction. # Note: tests run with warnings turned into errors, so it is # meaningful if this passes. if CI: with warnings.catch_warnings(): # Server occasionally blocks IERS download in CI. warnings.filterwarnings('ignore', message=r'.*using local IERS-B.*') # This also captures unclosed socket warning that is ignored in setup.cfg warnings.filterwarnings('ignore', message=r'.*unclosed.*') altaz_auto = sc.transform_to(altaz) else: altaz_auto = sc.transform_to(altaz) # No warnings with iers.earth_orientation_table.set(iers.IERS_B.open()): with pytest.warns(AstropyWarning, match='after IERS data'): altaz_b = sc.transform_to(altaz) sep_b_auto = altaz_b.separation(altaz_auto) assert_allclose(sep_b_auto, 0.0*u.deg, atol=1*u.arcsec) assert sep_b_auto > 10*u.microarcsecond # Check we returned to regular IERS system. altaz_auto2 = sc.transform_to(altaz) assert altaz_auto2.separation(altaz_auto) == 0. @pytest.mark.remote_data @pytest.mark.skipif(not HAS_JPLEPHEM, reason='requires jplephem') def test_ephemerides(): """ We test that using different ephemerides gives very similar results for transformations """ t = Time("2014-12-25T07:00") moon = SkyCoord(GCRS(318.10579159*u.deg, -11.65281165*u.deg, 365042.64880308*u.km, obstime=t)) icrs_frame = ICRS() hcrs_frame = HCRS(obstime=t) ecl_frame = HeliocentricMeanEcliptic(equinox=t) cirs_frame = CIRS(obstime=t) moon_icrs_builtin = moon.transform_to(icrs_frame) moon_hcrs_builtin = moon.transform_to(hcrs_frame) moon_helioecl_builtin = moon.transform_to(ecl_frame) moon_cirs_builtin = moon.transform_to(cirs_frame) with solar_system_ephemeris.set('jpl'): moon_icrs_jpl = moon.transform_to(icrs_frame) moon_hcrs_jpl = moon.transform_to(hcrs_frame) moon_helioecl_jpl = moon.transform_to(ecl_frame) moon_cirs_jpl = moon.transform_to(cirs_frame) # most transformations should differ by an amount which is # non-zero but of order milliarcsecs sep_icrs = moon_icrs_builtin.separation(moon_icrs_jpl) sep_hcrs = moon_hcrs_builtin.separation(moon_hcrs_jpl) sep_helioecl = moon_helioecl_builtin.separation(moon_helioecl_jpl) sep_cirs = moon_cirs_builtin.separation(moon_cirs_jpl) assert_allclose([sep_icrs, sep_hcrs, sep_helioecl], 0.0*u.deg, atol=10*u.mas) assert all(sep > 10*u.microarcsecond for sep in (sep_icrs, sep_hcrs, sep_helioecl)) # CIRS should be the same assert_allclose(sep_cirs, 0.0*u.deg, atol=1*u.microarcsecond) def test_tete_transforms(): """ We test the TETE transforms for proper behaviour here. The TETE transforms are tested for accuracy against JPL Horizons in test_solar_system.py. Here we are looking to check for consistency and errors in the self transform. """ loc = EarthLocation.from_geodetic("-22°57'35.1", "-67°47'14.1", 5186*u.m) time = Time('2020-04-06T00:00') p, v = loc.get_gcrs_posvel(time) gcrs_frame = GCRS(obstime=time, obsgeoloc=p, obsgeovel=v) moon = SkyCoord(169.24113968*u.deg, 10.86086666*u.deg, 358549.25381755*u.km, frame=gcrs_frame) tete_frame = TETE(obstime=time, location=loc) # need to set obsgeoloc/vel explicitly or skycoord behaviour over-writes tete_geo = TETE(obstime=time, location=EarthLocation(*([0, 0, 0]*u.km))) # test self-transform by comparing to GCRS-TETE-ITRS-TETE route tete_coo1 = moon.transform_to(tete_frame) tete_coo2 = moon.transform_to(tete_geo) assert_allclose(tete_coo1.separation_3d(tete_coo2), 0*u.mm, atol=1*u.mm) # test TETE-ITRS transform by comparing GCRS-CIRS-ITRS to GCRS-TETE-ITRS itrs1 = moon.transform_to(CIRS()).transform_to(ITRS()) itrs2 = moon.transform_to(TETE()).transform_to(ITRS()) assert_allclose(itrs1.separation_3d(itrs2), 0*u.mm, atol=1*u.mm) # test round trip GCRS->TETE->GCRS new_moon = moon.transform_to(TETE()).transform_to(moon) assert_allclose(new_moon.separation_3d(moon), 0*u.mm, atol=1*u.mm) # test round trip via ITRS tete_rt = tete_coo1.transform_to(ITRS(obstime=time)).transform_to(tete_coo1) assert_allclose(tete_rt.separation_3d(tete_coo1), 0*u.mm, atol=1*u.mm) # ensure deprecated routine remains consistent # make sure test raises warning! with pytest.warns(AstropyDeprecationWarning, match='The use of'): tete_alt = _apparent_position_in_true_coordinates(moon) assert_allclose(tete_coo1.separation_3d(tete_alt), 0*u.mm, atol=100*u.mm) def test_straight_overhead(): """ With a precise CIRS<->Observed transformation this should give Alt=90 exactly If the CIRS self-transform breaks it won't, due to improper treatment of aberration """ t = Time('J2010') obj = EarthLocation(-1*u.deg, 52*u.deg, height=10.*u.km) home = EarthLocation(-1*u.deg, 52*u.deg, height=0.*u.km) # An object that appears straight overhead - FOR A GEOCENTRIC OBSERVER. # Note, this won't be overhead for a topocentric observer because of # aberration. cirs_geo = obj.get_itrs(t).transform_to(CIRS(obstime=t)) # now get the Geocentric CIRS position of observatory obsrepr = home.get_itrs(t).transform_to(CIRS(obstime=t)).cartesian # topocentric CIRS position of a straight overhead object cirs_repr = cirs_geo.cartesian - obsrepr # create a CIRS object that appears straight overhead for a TOPOCENTRIC OBSERVER topocentric_cirs_frame = CIRS(obstime=t, location=home) cirs_topo = topocentric_cirs_frame.realize_frame(cirs_repr) # Check AltAz (though Azimuth can be anything so is not tested). aa = cirs_topo.transform_to(AltAz(obstime=t, location=home)) assert_allclose(aa.alt, 90*u.deg, atol=1*u.uas, rtol=0) # Check HADec. hd = cirs_topo.transform_to(HADec(obstime=t, location=home)) assert_allclose(hd.ha, 0*u.hourangle, atol=1*u.uas, rtol=0) assert_allclose(hd.dec, 52*u.deg, atol=1*u.uas, rtol=0) def test_itrs_straight_overhead(): """ With a precise ITRS<->Observed transformation this should give Alt=90 exactly """ t = Time('J2010') obj = EarthLocation(-1*u.deg, 52*u.deg, height=10.*u.km) home = EarthLocation(-1*u.deg, 52*u.deg, height=0.*u.km) # An object that appears straight overhead - FOR A GEOCENTRIC OBSERVER. itrs_geo = obj.get_itrs(t).cartesian # now get the Geocentric ITRS position of observatory obsrepr = home.get_itrs(t).cartesian # topocentric ITRS position of a straight overhead object itrs_repr = itrs_geo - obsrepr # create a ITRS object that appears straight overhead for a TOPOCENTRIC OBSERVER itrs_topo = ITRS(itrs_repr, obstime=t, location=home) # Check AltAz (though Azimuth can be anything so is not tested). aa = itrs_topo.transform_to(AltAz(obstime=t, location=home)) assert_allclose(aa.alt, 90*u.deg, atol=1*u.uas, rtol=0) # Check HADec. hd = itrs_topo.transform_to(HADec(obstime=t, location=home)) assert_allclose(hd.ha, 0*u.hourangle, atol=1*u.uas, rtol=0) assert_allclose(hd.dec, 52*u.deg, atol=1*u.uas, rtol=0) def jplephem_ge(minversion): """Check if jplephem is installed and has version >= minversion.""" # This is a separate routine since somehow with pyinstaller the stanza # not HAS_JPLEPHEM or metadata.version('jplephem') < '2.15' # leads to a module not found error. try: return HAS_JPLEPHEM and metadata.version('jplephem') >= minversion except Exception: return False @pytest.mark.remote_data @pytest.mark.skipif(not jplephem_ge('2.15'), reason='requires jplephem >= 2.15') def test_aa_hd_high_precision(): """These tests are provided by @mkbrewer - see issue #10356. The code that produces them agrees very well (<0.5 mas) with SkyField once Polar motion is turned off, but SkyField does not include polar motion, so a comparison to Skyfield or JPL Horizons will be ~1" off. The absence of polar motion within Skyfield and the disagreement between Skyfield and Horizons make high precision comparisons to those codes difficult. Updated 2020-11-29, after the comparison between codes became even better, down to 100 nas. NOTE: the agreement reflects consistency in approach between two codes, not necessarily absolute precision. If this test starts failing, the tolerance can and should be weakened *if* it is clear that the change is due to an improvement (e.g., a new IAU precession model). """ lat = -22.959748*u.deg lon = -67.787260*u.deg elev = 5186*u.m loc = EarthLocation.from_geodetic(lon, lat, elev) # Note: at this level of precision for the comparison, we have to include # the location in the time, as it influences the transformation to TDB. t = Time('2017-04-06T00:00:00.0', location=loc) with solar_system_ephemeris.set('de430'): moon = get_body('moon', t, loc) moon_aa = moon.transform_to(AltAz(obstime=t, location=loc)) moon_hd = moon.transform_to(HADec(obstime=t, location=loc)) # Numbers from # https://github.com/astropy/astropy/pull/11073#issuecomment-735486271 # updated in https://github.com/astropy/astropy/issues/11683 TARGET_AZ, TARGET_EL = 15.032673509956*u.deg, 50.303110133923*u.deg TARGET_DISTANCE = 376252883.247239*u.m assert_allclose(moon_aa.az, TARGET_AZ, atol=0.1*u.uas, rtol=0) assert_allclose(moon_aa.alt, TARGET_EL, atol=0.1*u.uas, rtol=0) assert_allclose(moon_aa.distance, TARGET_DISTANCE, atol=0.1*u.mm, rtol=0) ha, dec = erfa.ae2hd(moon_aa.az.to_value(u.radian), moon_aa.alt.to_value(u.radian), lat.to_value(u.radian)) ha = u.Quantity(ha, u.radian, copy=False) dec = u.Quantity(dec, u.radian, copy=False) assert_allclose(moon_hd.ha, ha, atol=0.1*u.uas, rtol=0) assert_allclose(moon_hd.dec, dec, atol=0.1*u.uas, rtol=0) def test_aa_high_precision_nodata(): """ These tests are designed to ensure high precision alt-az transforms. They are a slight fudge since the target values come from astropy itself. They are generated with a version of the code that passes the tests above, but for the internal solar system ephemerides to avoid the use of remote data. """ # Last updated when switching to erfa 2.0.0 and its moon98 function. TARGET_AZ, TARGET_EL = 15.03231495*u.deg, 50.3027193*u.deg lat = -22.959748*u.deg lon = -67.787260*u.deg elev = 5186*u.m loc = EarthLocation.from_geodetic(lon, lat, elev) t = Time('2017-04-06T00:00:00.0') moon = get_body('moon', t, loc) moon_aa = moon.transform_to(AltAz(obstime=t, location=loc)) assert_allclose(moon_aa.az - TARGET_AZ, 0*u.mas, atol=0.5*u.mas) assert_allclose(moon_aa.alt - TARGET_EL, 0*u.mas, atol=0.5*u.mas) class TestGetLocationGCRS: # TETE and CIRS use get_location_gcrs to get obsgeoloc and obsgeovel # with knowledge of some of the matrices. Check that this is consistent # with a direct transformation. def setup_class(cls): cls.loc = loc = EarthLocation.from_geodetic( np.linspace(0, 360, 6)*u.deg, np.linspace(-90, 90, 6)*u.deg, 100*u.m) cls.obstime = obstime = Time(np.linspace(2000, 2010, 6), format='jyear') # Get comparison via a full transformation. We do not use any methods # of EarthLocation, since those depend on the fast transform. loc_itrs = ITRS(loc.x, loc.y, loc.z, obstime=obstime) zeros = np.broadcast_to(0. * (u.km / u.s), (3,) + loc_itrs.shape, subok=True) loc_itrs.data.differentials['s'] = CartesianDifferential(zeros) loc_gcrs_cart = loc_itrs.transform_to(GCRS(obstime=obstime)).cartesian cls.obsgeoloc = loc_gcrs_cart.without_differentials() cls.obsgeovel = loc_gcrs_cart.differentials['s'].to_cartesian() def check_obsgeo(self, obsgeoloc, obsgeovel): assert_allclose(obsgeoloc.xyz, self.obsgeoloc.xyz, atol=.1*u.um, rtol=0.) assert_allclose(obsgeovel.xyz, self.obsgeovel.xyz, atol=.1*u.mm/u.s, rtol=0.) def test_get_gcrs_posvel(self): # Really just a sanity check self.check_obsgeo(*self.loc.get_gcrs_posvel(self.obstime)) def test_tete_quick(self): # Following copied from intermediate_rotation_transforms.gcrs_to_tete rbpn = erfa.pnm06a(*get_jd12(self.obstime, 'tt')) loc_gcrs_frame = get_location_gcrs(self.loc, self.obstime, tete_to_itrs_mat(self.obstime, rbpn=rbpn), rbpn) self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel) def test_cirs_quick(self): cirs_frame = CIRS(location=self.loc, obstime=self.obstime) # Following copied from intermediate_rotation_transforms.gcrs_to_cirs pmat = gcrs_to_cirs_mat(cirs_frame.obstime) loc_gcrs_frame = get_location_gcrs(self.loc, self.obstime, cirs_to_itrs_mat(cirs_frame.obstime), pmat) self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel)
929f60453c2e2b29d3798f1a46a33eab571d218f4d4a33539fab3b3b3b24f023
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This includes tests for the Distance class and related calculations """ import pytest import numpy as np from numpy import testing as npt from astropy import units as u from astropy.units import allclose as quantity_allclose from astropy.coordinates import Longitude, Latitude, Distance, CartesianRepresentation from astropy.coordinates.builtin_frames import ICRS, Galactic from astropy.utils.exceptions import AstropyWarning from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa MULTIPLE_INPUTS_ERROR_MSG = "^more than one of `.*` were given to Distance constructor$" def test_distances(): """ Tests functionality for Coordinate class distances and cartesian transformations. """ ''' Distances can also be specified, and allow for a full 3D definition of a coordinate. ''' # try all the different ways to initialize a Distance distance = Distance(12, u.parsec) Distance(40, unit=u.au) Distance(value=5, unit=u.kpc) # need to provide a unit with pytest.raises(u.UnitsError): Distance(12) with pytest.raises(ValueError, match='none of `value`, `z`, `distmod`,'): Distance(unit=u.km) # standard units are pre-defined npt.assert_allclose(distance.lyr, 39.138765325702551) npt.assert_allclose(distance.km, 370281309776063.0) # Coordinate objects can be assigned a distance object, giving them a full # 3D position c = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree, distance=Distance(12, u.parsec)) assert quantity_allclose(c.distance, 12 * u.pc) # or initialize distances via redshifts - this is actually tested in the # function below that checks for scipy. This is kept here as an example # c.distance = Distance(z=0.2) # uses current cosmology # with whatever your preferred cosmology may be # c.distance = Distance(z=0.2, cosmology=WMAP5) # Coordinate objects can be initialized with a distance using special # syntax c1 = Galactic(l=158.558650*u.deg, b=-43.350066*u.deg, distance=12 * u.kpc) # Coordinate objects can be instantiated with cartesian coordinates # Internally they will immediately be converted to two angles + a distance cart = CartesianRepresentation(x=2 * u.pc, y=4 * u.pc, z=8 * u.pc) c2 = Galactic(cart) sep12 = c1.separation_3d(c2) # returns a *3d* distance between the c1 and c2 coordinates # not that this does *not* assert isinstance(sep12, Distance) npt.assert_allclose(sep12.pc, 12005.784163916317, 10) ''' All spherical coordinate systems with distances can be converted to cartesian coordinates. ''' cartrep2 = c2.cartesian assert isinstance(cartrep2.x, u.Quantity) npt.assert_allclose(cartrep2.x.value, 2) npt.assert_allclose(cartrep2.y.value, 4) npt.assert_allclose(cartrep2.z.value, 8) # with no distance, the unit sphere is assumed when converting to cartesian c3 = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree, distance=None) unitcart = c3.cartesian npt.assert_allclose(((unitcart.x**2 + unitcart.y**2 + unitcart.z**2)**0.5).value, 1.0) # TODO: choose between these when CartesianRepresentation gets a definite # decision on whether or not it gets __add__ # # CartesianRepresentation objects can be added and subtracted, which are # vector/elementwise they can also be given as arguments to a coordinate # system # csum = ICRS(c1.cartesian + c2.cartesian) csumrep = CartesianRepresentation(c1.cartesian.xyz + c2.cartesian.xyz) csum = ICRS(csumrep) npt.assert_allclose(csumrep.x.value, -8.12016610185) npt.assert_allclose(csumrep.y.value, 3.19380597435) npt.assert_allclose(csumrep.z.value, -8.2294483707) npt.assert_allclose(csum.ra.degree, 158.529401774) npt.assert_allclose(csum.dec.degree, -43.3235825777) npt.assert_allclose(csum.distance.kpc, 11.9942200501) @pytest.mark.skipif('not HAS_SCIPY') def test_distances_scipy(): """ The distance-related tests that require scipy due to the cosmology module needing scipy integration routines """ from astropy.cosmology import WMAP5 # try different ways to initialize a Distance d4 = Distance(z=0.23) # uses default cosmology - as of writing, WMAP7 npt.assert_allclose(d4.z, 0.23, rtol=1e-8) d5 = Distance(z=0.23, cosmology=WMAP5) npt.assert_allclose(d5.compute_z(WMAP5), 0.23, rtol=1e-8) d6 = Distance(z=0.23, cosmology=WMAP5, unit=u.km) npt.assert_allclose(d6.value, 3.5417046898762366e+22) with pytest.raises(ValueError, match='a `cosmology` was given but `z`'): Distance(parallax=1*u.mas, cosmology=WMAP5) # Regression test for #12531 with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG): Distance(z=0.23, parallax=1*u.mas) # vectors! regression test for #11949 d4 = Distance(z=[0.23, 0.45]) # as of writing, Planck18 npt.assert_allclose(d4.z, [0.23, 0.45], rtol=1e-8) def test_distance_change(): ra = Longitude("4:08:15.162342", unit=u.hour) dec = Latitude("-41:08:15.162342", unit=u.degree) c1 = ICRS(ra, dec, Distance(1, unit=u.kpc)) oldx = c1.cartesian.x.value assert (oldx - 0.35284083171901953) < 1e-10 # first make sure distances are immutable with pytest.raises(AttributeError): c1.distance = Distance(2, unit=u.kpc) # now x should increase with a bigger distance increases c2 = ICRS(ra, dec, Distance(2, unit=u.kpc)) assert c2.cartesian.x.value == oldx * 2 def test_distance_is_quantity(): """ test that distance behaves like a proper quantity """ Distance(2 * u.kpc) d = Distance([2, 3.1], u.kpc) assert d.shape == (2,) a = d.view(np.ndarray) q = d.view(u.Quantity) a[0] = 1.2 q.value[1] = 5.4 assert d[0].value == 1.2 assert d[1].value == 5.4 q = u.Quantity(d, copy=True) q.value[1] = 0 assert q.value[1] == 0 assert d.value[1] != 0 # regression test against #2261 d = Distance([2 * u.kpc, 250. * u.pc]) assert d.unit is u.kpc assert np.all(d.value == np.array([2., 0.25])) def test_distmod(): d = Distance(10, u.pc) assert d.distmod.value == 0 d = Distance(distmod=20) assert d.distmod.value == 20 assert d.kpc == 100 d = Distance(distmod=-1., unit=u.au) npt.assert_allclose(d.value, 1301442.9440836983) with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG): d = Distance(value=d, distmod=20) with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG): d = Distance(z=.23, distmod=20) # check the Mpc/kpc/pc behavior assert Distance(distmod=1).unit == u.pc assert Distance(distmod=11).unit == u.kpc assert Distance(distmod=26).unit == u.Mpc assert Distance(distmod=-21).unit == u.AU # if an array, uses the mean of the log of the distances assert Distance(distmod=[1, 11, 26]).unit == u.kpc def test_parallax(): d = Distance(parallax=1*u.arcsecond) assert d.pc == 1. with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG): d = Distance(15*u.pc, parallax=20*u.milliarcsecond) with pytest.raises(ValueError, match=MULTIPLE_INPUTS_ERROR_MSG): d = Distance(parallax=20*u.milliarcsecond, distmod=20) # array plx = [1, 10, 100.]*u.mas d = Distance(parallax=plx) assert quantity_allclose(d.pc, [1000., 100., 10.]) assert quantity_allclose(plx, d.parallax) error_message = ( r"^some parallaxes are negative, which are not interpretable as distances\. " ) with pytest.raises(ValueError, match=error_message): Distance(parallax=-1 * u.mas) with pytest.raises(ValueError, match=error_message): Distance(parallax=[10, 1, -1] * u.mas) warning_message = "^negative parallaxes are converted to NaN distances even when" with pytest.warns(AstropyWarning, match=warning_message): Distance(parallax=-1 * u.mas, allow_negative=True) with pytest.warns(AstropyWarning, match=warning_message): Distance(parallax=[10, 1, -1] * u.mas, allow_negative=True) # Regression test for #12569; `unit` was ignored if `parallax` was given. d = Distance(parallax=1*u.mas, unit=u.kpc) assert d.value == 1. assert d.unit is u.kpc def test_distance_in_coordinates(): """ test that distances can be created from quantities and that cartesian representations come out right """ ra = Longitude("4:08:15.162342", unit=u.hour) dec = Latitude("-41:08:15.162342", unit=u.degree) coo = ICRS(ra, dec, distance=2*u.kpc) cart = coo.cartesian assert isinstance(cart.xyz, u.Quantity) def test_negative_distance(): """ Test optional kwarg allow_negative """ error_message = ( r"^distance must be >= 0\. Use the argument `allow_negative=True` to allow " r"negative values\.$") with pytest.raises(ValueError, match=error_message): Distance([-2, 3.1], u.kpc) with pytest.raises(ValueError, match=error_message): Distance([-2, -3.1], u.kpc) with pytest.raises(ValueError, match=error_message): Distance(-2, u.kpc) d = Distance(-2, u.kpc, allow_negative=True) assert d.value == -2 def test_distance_comparison(): """Ensure comparisons of distances work (#2206, #2250)""" a = Distance(15*u.kpc) b = Distance(15*u.kpc) assert a == b c = Distance(1.*u.Mpc) assert a < c def test_distance_to_quantity_when_not_units_of_length(): """Any operation that leaves units other than those of length should turn a distance into a quantity (#2206, #2250)""" d = Distance(15*u.kpc) twice = 2.*d assert isinstance(twice, Distance) area = 4.*np.pi*d**2 assert area.unit.is_equivalent(u.m**2) assert not isinstance(area, Distance) assert type(area) is u.Quantity def test_distance_nan(): # Check that giving NaNs to Distance doesn't emit a warning Distance([0, np.nan, 1] * u.m)
aaba4919712a7a76060c454c6b5c85ea098fc2c24e3a02281a65d32bcc77f341
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Facilities for diffing two FITS files. Includes objects for diffing entire FITS files, individual HDUs, FITS headers, or just FITS data. Used to implement the fitsdiff program. """ import fnmatch import glob import io import operator import os import os.path import textwrap from collections import defaultdict from inspect import signature from itertools import islice import numpy as np from astropy import __version__ from .card import Card, BLANK_CARD from .header import Header # HDUList is used in one of the doctests from .hdu.hdulist import fitsopen, HDUList # pylint: disable=W0611 from .hdu.table import _TableLikeHDU from astropy.utils.diff import (report_diff_values, fixed_width_indent, where_not_allclose, diff_values) from astropy.utils.misc import NOT_OVERWRITING_MSG __all__ = ['FITSDiff', 'HDUDiff', 'HeaderDiff', 'ImageDataDiff', 'RawDataDiff', 'TableDataDiff'] # Column attributes of interest for comparison _COL_ATTRS = [('unit', 'units'), ('null', 'null values'), ('bscale', 'bscales'), ('bzero', 'bzeros'), ('disp', 'display formats'), ('dim', 'dimensions')] class _BaseDiff: """ Base class for all FITS diff objects. When instantiating a FITS diff object, the first two arguments are always the two objects to diff (two FITS files, two FITS headers, etc.). Instantiating a ``_BaseDiff`` also causes the diff itself to be executed. The returned ``_BaseDiff`` instance has a number of attribute that describe the results of the diff operation. The most basic attribute, present on all ``_BaseDiff`` instances, is ``.identical`` which is `True` if the two objects being compared are identical according to the diff method for objects of that type. """ def __init__(self, a, b): """ The ``_BaseDiff`` class does not implement a ``_diff`` method and should not be instantiated directly. Instead instantiate the appropriate subclass of ``_BaseDiff`` for the objects being compared (for example, use `HeaderDiff` to compare two `Header` objects. """ self.a = a self.b = b # For internal use in report output self._fileobj = None self._indent = 0 self._diff() def __bool__(self): """ A ``_BaseDiff`` object acts as `True` in a boolean context if the two objects compared are identical. Otherwise it acts as `False`. """ return not self.identical @classmethod def fromdiff(cls, other, a, b): """ Returns a new Diff object of a specific subclass from an existing diff object, passing on the values for any arguments they share in common (such as ignore_keywords). For example:: >>> from astropy.io import fits >>> hdul1, hdul2 = fits.HDUList(), fits.HDUList() >>> headera, headerb = fits.Header(), fits.Header() >>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*']) >>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb) >>> list(hd.ignore_keywords) ['*'] """ sig = signature(cls.__init__) # The first 3 arguments of any Diff initializer are self, a, and b. kwargs = {} for arg in list(sig.parameters.keys())[3:]: if hasattr(other, arg): kwargs[arg] = getattr(other, arg) return cls(a, b, **kwargs) @property def identical(self): """ `True` if all the ``.diff_*`` attributes on this diff instance are empty, implying that no differences were found. Any subclass of ``_BaseDiff`` must have at least one ``.diff_*`` attribute, which contains a non-empty value if and only if some difference was found between the two objects being compared. """ return not any(getattr(self, attr) for attr in self.__dict__ if attr.startswith('diff_')) def report(self, fileobj=None, indent=0, overwrite=False): """ Generates a text report on the differences (if any) between two objects, and either returns it as a string or writes it to a file-like object. Parameters ---------- fileobj : file-like, string, or None, optional If `None`, this method returns the report as a string. Otherwise it returns `None` and writes the report to the given file-like object (which must have a ``.write()`` method at a minimum), or to a new file at the path specified. indent : int The number of 4 space tabs to indent the report. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. Returns ------- report : str or None """ return_string = False filepath = None if isinstance(fileobj, str): if os.path.exists(fileobj) and not overwrite: raise OSError(NOT_OVERWRITING_MSG.format(fileobj)) else: filepath = fileobj fileobj = open(filepath, 'w') elif fileobj is None: fileobj = io.StringIO() return_string = True self._fileobj = fileobj self._indent = indent # This is used internally by _writeln try: self._report() finally: if filepath: fileobj.close() if return_string: return fileobj.getvalue() def _writeln(self, text): self._fileobj.write(fixed_width_indent(text, self._indent) + '\n') def _diff(self): raise NotImplementedError def _report(self): raise NotImplementedError class FITSDiff(_BaseDiff): """Diff two FITS files by filename, or two `HDUList` objects. `FITSDiff` objects have the following diff attributes: - ``diff_hdu_count``: If the FITS files being compared have different numbers of HDUs, this contains a 2-tuple of the number of HDUs in each file. - ``diff_hdus``: If any HDUs with the same index are different, this contains a list of 2-tuples of the HDU index and the `HDUDiff` object representing the differences between the two HDUs. """ def __init__(self, a, b, ignore_hdus=[], ignore_keywords=[], ignore_comments=[], ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True): """ Parameters ---------- a : str or `HDUList` The filename of a FITS file on disk, or an `HDUList` object. b : str or `HDUList` The filename of a FITS file on disk, or an `HDUList` object to compare to the first file. ignore_hdus : sequence, optional HDU names to ignore when comparing two FITS files or HDU lists; the presence of these HDUs and their contents are ignored. Wildcard strings may also be included in the list. ignore_keywords : sequence, optional Header keywords to ignore when comparing two headers; the presence of these keywords and their values are ignored. Wildcard strings may also be included in the list. ignore_comments : sequence, optional A list of header keywords whose comments should be ignored in the comparison. May contain wildcard strings as with ignore_keywords. ignore_fields : sequence, optional The (case-insensitive) names of any table columns to ignore if any table data is to be compared. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionadded:: 2.0 atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 ignore_blanks : bool, optional Ignore extra whitespace at the end of string values either in headers or data. Extra leading whitespace is not ignored (default: True). ignore_blank_cards : bool, optional Ignore all cards that are blank, i.e. they only contain whitespace (default: True). """ if isinstance(a, (str, os.PathLike)): try: a = fitsopen(a) except Exception as exc: raise OSError("error opening file a ({}): {}: {}".format( a, exc.__class__.__name__, exc.args[0])) close_a = True else: close_a = False if isinstance(b, (str, os.PathLike)): try: b = fitsopen(b) except Exception as exc: raise OSError("error opening file b ({}): {}: {}".format( b, exc.__class__.__name__, exc.args[0])) close_b = True else: close_b = False # Normalize keywords/fields to ignore to upper case self.ignore_hdus = {k.upper() for k in ignore_hdus} self.ignore_keywords = {k.upper() for k in ignore_keywords} self.ignore_comments = {k.upper() for k in ignore_comments} self.ignore_fields = {k.upper() for k in ignore_fields} self.numdiffs = numdiffs self.rtol = rtol self.atol = atol self.ignore_blanks = ignore_blanks self.ignore_blank_cards = ignore_blank_cards # Some hdu names may be pattern wildcards. Find them. self.ignore_hdu_patterns = set() for name in list(self.ignore_hdus): if name != '*' and glob.has_magic(name): self.ignore_hdus.remove(name) self.ignore_hdu_patterns.add(name) self.diff_hdu_count = () self.diff_hdus = [] try: super().__init__(a, b) finally: if close_a: a.close() if close_b: b.close() def _diff(self): if len(self.a) != len(self.b): self.diff_hdu_count = (len(self.a), len(self.b)) # Record filenames for use later in _report self.filenamea = self.a.filename() if not self.filenamea: self.filenamea = f'<{self.a.__class__.__name__} object at {id(self.a):#x}>' self.filenameb = self.b.filename() if not self.filenameb: self.filenameb = f'<{self.b.__class__.__name__} object at {id(self.b):#x}>' if self.ignore_hdus: self.a = HDUList([h for h in self.a if h.name not in self.ignore_hdus]) self.b = HDUList([h for h in self.b if h.name not in self.ignore_hdus]) if self.ignore_hdu_patterns: a_names = [hdu.name for hdu in self.a] b_names = [hdu.name for hdu in self.b] for pattern in self.ignore_hdu_patterns: self.a = HDUList([h for h in self.a if h.name not in fnmatch.filter( a_names, pattern)]) self.b = HDUList([h for h in self.b if h.name not in fnmatch.filter( b_names, pattern)]) # For now, just compare the extensions one by one in order. # Might allow some more sophisticated types of diffing later. # TODO: Somehow or another simplify the passing around of diff # options--this will become important as the number of options grows for idx in range(min(len(self.a), len(self.b))): hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx]) if not hdu_diff.identical: if self.a[idx].name == self.b[idx].name and self.a[idx].ver == self.b[idx].ver: self.diff_hdus.append((idx, hdu_diff, self.a[idx].name, self.a[idx].ver)) else: self.diff_hdus.append((idx, hdu_diff, "", self.a[idx].ver)) def _report(self): wrapper = textwrap.TextWrapper(initial_indent=' ', subsequent_indent=' ') self._fileobj.write('\n') self._writeln(f' fitsdiff: {__version__}') self._writeln(f' a: {self.filenamea}\n b: {self.filenameb}') if self.ignore_hdus: ignore_hdus = ' '.join(sorted(self.ignore_hdus)) self._writeln(f' HDU(s) not to be compared:\n{wrapper.fill(ignore_hdus)}') if self.ignore_hdu_patterns: ignore_hdu_patterns = ' '.join(sorted(self.ignore_hdu_patterns)) self._writeln(' HDU(s) not to be compared:\n{}' .format(wrapper.fill(ignore_hdu_patterns))) if self.ignore_keywords: ignore_keywords = ' '.join(sorted(self.ignore_keywords)) self._writeln(' Keyword(s) not to be compared:\n{}' .format(wrapper.fill(ignore_keywords))) if self.ignore_comments: ignore_comments = ' '.join(sorted(self.ignore_comments)) self._writeln(' Keyword(s) whose comments are not to be compared' ':\n{}'.format(wrapper.fill(ignore_comments))) if self.ignore_fields: ignore_fields = ' '.join(sorted(self.ignore_fields)) self._writeln(' Table column(s) not to be compared:\n{}' .format(wrapper.fill(ignore_fields))) self._writeln(' Maximum number of different data values to be ' 'reported: {}'.format(self.numdiffs)) self._writeln(' Relative tolerance: {}, Absolute tolerance: {}' .format(self.rtol, self.atol)) if self.diff_hdu_count: self._fileobj.write('\n') self._writeln('Files contain different numbers of HDUs:') self._writeln(f' a: {self.diff_hdu_count[0]}') self._writeln(f' b: {self.diff_hdu_count[1]}') if not self.diff_hdus: self._writeln('No differences found between common HDUs.') return elif not self.diff_hdus: self._fileobj.write('\n') self._writeln('No differences found.') return for idx, hdu_diff, extname, extver in self.diff_hdus: # print out the extension heading if idx == 0: self._fileobj.write('\n') self._writeln('Primary HDU:') else: self._fileobj.write('\n') if extname: self._writeln(f'Extension HDU {idx} ({extname}, {extver}):') else: self._writeln(f'Extension HDU {idx}:') hdu_diff.report(self._fileobj, indent=self._indent + 1) class HDUDiff(_BaseDiff): """ Diff two HDU objects, including their headers and their data (but only if both HDUs contain the same type of data (image, table, or unknown). `HDUDiff` objects have the following diff attributes: - ``diff_extnames``: If the two HDUs have different EXTNAME values, this contains a 2-tuple of the different extension names. - ``diff_extvers``: If the two HDUS have different EXTVER values, this contains a 2-tuple of the different extension versions. - ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this contains a 2-tuple of the different extension levels. - ``diff_extension_types``: If the two HDUs have different XTENSION values, this contains a 2-tuple of the different extension types. - ``diff_headers``: Contains a `HeaderDiff` object for the headers of the two HDUs. This will always contain an object--it may be determined whether the headers are different through ``diff_headers.identical``. - ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or `RawDataDiff` as appropriate for the data in the HDUs, and only if the two HDUs have non-empty data of the same type (`RawDataDiff` is used for HDUs containing non-empty data of an indeterminate type). """ def __init__(self, a, b, ignore_keywords=[], ignore_comments=[], ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True): """ Parameters ---------- a : BaseHDU An HDU object. b : BaseHDU An HDU object to compare to the first HDU object. ignore_keywords : sequence, optional Header keywords to ignore when comparing two headers; the presence of these keywords and their values are ignored. Wildcard strings may also be included in the list. ignore_comments : sequence, optional A list of header keywords whose comments should be ignored in the comparison. May contain wildcard strings as with ignore_keywords. ignore_fields : sequence, optional The (case-insensitive) names of any table columns to ignore if any table data is to be compared. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionadded:: 2.0 atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 ignore_blanks : bool, optional Ignore extra whitespace at the end of string values either in headers or data. Extra leading whitespace is not ignored (default: True). ignore_blank_cards : bool, optional Ignore all cards that are blank, i.e. they only contain whitespace (default: True). """ self.ignore_keywords = {k.upper() for k in ignore_keywords} self.ignore_comments = {k.upper() for k in ignore_comments} self.ignore_fields = {k.upper() for k in ignore_fields} self.rtol = rtol self.atol = atol self.numdiffs = numdiffs self.ignore_blanks = ignore_blanks self.ignore_blank_cards = ignore_blank_cards self.diff_extnames = () self.diff_extvers = () self.diff_extlevels = () self.diff_extension_types = () self.diff_headers = None self.diff_data = None super().__init__(a, b) def _diff(self): if self.a.name != self.b.name: self.diff_extnames = (self.a.name, self.b.name) if self.a.ver != self.b.ver: self.diff_extvers = (self.a.ver, self.b.ver) if self.a.level != self.b.level: self.diff_extlevels = (self.a.level, self.b.level) if self.a.header.get('XTENSION') != self.b.header.get('XTENSION'): self.diff_extension_types = (self.a.header.get('XTENSION'), self.b.header.get('XTENSION')) self.diff_headers = HeaderDiff.fromdiff(self, self.a.header.copy(), self.b.header.copy()) if self.a.data is None or self.b.data is None: # TODO: Perhaps have some means of marking this case pass elif self.a.is_image and self.b.is_image: self.diff_data = ImageDataDiff.fromdiff(self, self.a.data, self.b.data) # Clean up references to (possibly) memmapped arrays so they can # be closed by .close() self.diff_data.a = None self.diff_data.b = None elif (isinstance(self.a, _TableLikeHDU) and isinstance(self.b, _TableLikeHDU)): # TODO: Replace this if/when _BaseHDU grows a .is_table property self.diff_data = TableDataDiff.fromdiff(self, self.a.data, self.b.data) # Clean up references to (possibly) memmapped arrays so they can # be closed by .close() self.diff_data.a = None self.diff_data.b = None elif not self.diff_extension_types: # Don't diff the data for unequal extension types that are not # recognized image or table types self.diff_data = RawDataDiff.fromdiff(self, self.a.data, self.b.data) # Clean up references to (possibly) memmapped arrays so they can # be closed by .close() self.diff_data.a = None self.diff_data.b = None def _report(self): if self.identical: self._writeln(" No differences found.") if self.diff_extension_types: self._writeln(" Extension types differ:\n a: {}\n " "b: {}".format(*self.diff_extension_types)) if self.diff_extnames: self._writeln(" Extension names differ:\n a: {}\n " "b: {}".format(*self.diff_extnames)) if self.diff_extvers: self._writeln(" Extension versions differ:\n a: {}\n " "b: {}".format(*self.diff_extvers)) if self.diff_extlevels: self._writeln(" Extension levels differ:\n a: {}\n " "b: {}".format(*self.diff_extlevels)) if not self.diff_headers.identical: self._fileobj.write('\n') self._writeln(" Headers contain differences:") self.diff_headers.report(self._fileobj, indent=self._indent + 1) if self.diff_data is not None and not self.diff_data.identical: self._fileobj.write('\n') self._writeln(" Data contains differences:") self.diff_data.report(self._fileobj, indent=self._indent + 1) class HeaderDiff(_BaseDiff): """ Diff two `Header` objects. `HeaderDiff` objects have the following diff attributes: - ``diff_keyword_count``: If the two headers contain a different number of keywords, this contains a 2-tuple of the keyword count for each header. - ``diff_keywords``: If either header contains one or more keywords that don't appear at all in the other header, this contains a 2-tuple consisting of a list of the keywords only appearing in header a, and a list of the keywords only appearing in header b. - ``diff_duplicate_keywords``: If a keyword appears in both headers at least once, but contains a different number of duplicates (for example, a different number of HISTORY cards in each header), an item is added to this dict with the keyword as the key, and a 2-tuple of the different counts of that keyword as the value. For example:: {'HISTORY': (20, 19)} means that header a contains 20 HISTORY cards, while header b contains only 19 HISTORY cards. - ``diff_keyword_values``: If any of the common keyword between the two headers have different values, they appear in this dict. It has a structure similar to ``diff_duplicate_keywords``, with the keyword as the key, and a 2-tuple of the different values as the value. For example:: {'NAXIS': (2, 3)} means that the NAXIS keyword has a value of 2 in header a, and a value of 3 in header b. This excludes any keywords matched by the ``ignore_keywords`` list. - ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains differences between keyword comments. `HeaderDiff` objects also have a ``common_keywords`` attribute that lists all keywords that appear in both headers. """ def __init__(self, a, b, ignore_keywords=[], ignore_comments=[], rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True): """ Parameters ---------- a : `~astropy.io.fits.Header` or string or bytes A header. b : `~astropy.io.fits.Header` or string or bytes A header to compare to the first header. ignore_keywords : sequence, optional Header keywords to ignore when comparing two headers; the presence of these keywords and their values are ignored. Wildcard strings may also be included in the list. ignore_comments : sequence, optional A list of header keywords whose comments should be ignored in the comparison. May contain wildcard strings as with ignore_keywords. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionadded:: 2.0 atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 ignore_blanks : bool, optional Ignore extra whitespace at the end of string values either in headers or data. Extra leading whitespace is not ignored (default: True). ignore_blank_cards : bool, optional Ignore all cards that are blank, i.e. they only contain whitespace (default: True). """ self.ignore_keywords = {k.upper() for k in ignore_keywords} self.ignore_comments = {k.upper() for k in ignore_comments} self.rtol = rtol self.atol = atol self.ignore_blanks = ignore_blanks self.ignore_blank_cards = ignore_blank_cards self.ignore_keyword_patterns = set() self.ignore_comment_patterns = set() for keyword in list(self.ignore_keywords): keyword = keyword.upper() if keyword != '*' and glob.has_magic(keyword): self.ignore_keywords.remove(keyword) self.ignore_keyword_patterns.add(keyword) for keyword in list(self.ignore_comments): keyword = keyword.upper() if keyword != '*' and glob.has_magic(keyword): self.ignore_comments.remove(keyword) self.ignore_comment_patterns.add(keyword) # Keywords appearing in each header self.common_keywords = [] # Set to the number of keywords in each header if the counts differ self.diff_keyword_count = () # Set if the keywords common to each header (excluding ignore_keywords) # appear in different positions within the header # TODO: Implement this self.diff_keyword_positions = () # Keywords unique to each header (excluding keywords in # ignore_keywords) self.diff_keywords = () # Keywords that have different numbers of duplicates in each header # (excluding keywords in ignore_keywords) self.diff_duplicate_keywords = {} # Keywords common to each header but having different values (excluding # keywords in ignore_keywords) self.diff_keyword_values = defaultdict(list) # Keywords common to each header but having different comments # (excluding keywords in ignore_keywords or in ignore_comments) self.diff_keyword_comments = defaultdict(list) if isinstance(a, str): a = Header.fromstring(a) if isinstance(b, str): b = Header.fromstring(b) if not (isinstance(a, Header) and isinstance(b, Header)): raise TypeError('HeaderDiff can only diff astropy.io.fits.Header ' 'objects or strings containing FITS headers.') super().__init__(a, b) # TODO: This doesn't pay much attention to the *order* of the keywords, # except in the case of duplicate keywords. The order should be checked # too, or at least it should be an option. def _diff(self): if self.ignore_blank_cards: cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD] cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD] else: cardsa = list(self.a.cards) cardsb = list(self.b.cards) # build dictionaries of keyword values and comments def get_header_values_comments(cards): values = {} comments = {} for card in cards: value = card.value if self.ignore_blanks and isinstance(value, str): value = value.rstrip() values.setdefault(card.keyword, []).append(value) comments.setdefault(card.keyword, []).append(card.comment) return values, comments valuesa, commentsa = get_header_values_comments(cardsa) valuesb, commentsb = get_header_values_comments(cardsb) # Normalize all keyword to upper-case for comparison's sake; # TODO: HIERARCH keywords should be handled case-sensitively I think keywordsa = {k.upper() for k in valuesa} keywordsb = {k.upper() for k in valuesb} self.common_keywords = sorted(keywordsa.intersection(keywordsb)) if len(cardsa) != len(cardsb): self.diff_keyword_count = (len(cardsa), len(cardsb)) # Any other diff attributes should exclude ignored keywords keywordsa = keywordsa.difference(self.ignore_keywords) keywordsb = keywordsb.difference(self.ignore_keywords) if self.ignore_keyword_patterns: for pattern in self.ignore_keyword_patterns: keywordsa = keywordsa.difference(fnmatch.filter(keywordsa, pattern)) keywordsb = keywordsb.difference(fnmatch.filter(keywordsb, pattern)) if '*' in self.ignore_keywords: # Any other differences between keywords are to be ignored return left_only_keywords = sorted(keywordsa.difference(keywordsb)) right_only_keywords = sorted(keywordsb.difference(keywordsa)) if left_only_keywords or right_only_keywords: self.diff_keywords = (left_only_keywords, right_only_keywords) # Compare count of each common keyword for keyword in self.common_keywords: if keyword in self.ignore_keywords: continue if self.ignore_keyword_patterns: skip = False for pattern in self.ignore_keyword_patterns: if fnmatch.fnmatch(keyword, pattern): skip = True break if skip: continue counta = len(valuesa[keyword]) countb = len(valuesb[keyword]) if counta != countb: self.diff_duplicate_keywords[keyword] = (counta, countb) # Compare keywords' values and comments for a, b in zip(valuesa[keyword], valuesb[keyword]): if diff_values(a, b, rtol=self.rtol, atol=self.atol): self.diff_keyword_values[keyword].append((a, b)) else: # If there are duplicate keywords we need to be able to # index each duplicate; if the values of a duplicate # are identical use None here self.diff_keyword_values[keyword].append(None) if not any(self.diff_keyword_values[keyword]): # No differences found; delete the array of Nones del self.diff_keyword_values[keyword] if '*' in self.ignore_comments or keyword in self.ignore_comments: continue if self.ignore_comment_patterns: skip = False for pattern in self.ignore_comment_patterns: if fnmatch.fnmatch(keyword, pattern): skip = True break if skip: continue for a, b in zip(commentsa[keyword], commentsb[keyword]): if diff_values(a, b): self.diff_keyword_comments[keyword].append((a, b)) else: self.diff_keyword_comments[keyword].append(None) if not any(self.diff_keyword_comments[keyword]): del self.diff_keyword_comments[keyword] def _report(self): if self.diff_keyword_count: self._writeln(' Headers have different number of cards:') self._writeln(f' a: {self.diff_keyword_count[0]}') self._writeln(f' b: {self.diff_keyword_count[1]}') if self.diff_keywords: for keyword in self.diff_keywords[0]: if keyword in Card._commentary_keywords: val = self.a[keyword][0] else: val = self.a[keyword] self._writeln(f' Extra keyword {keyword!r:8} in a: {val!r}') for keyword in self.diff_keywords[1]: if keyword in Card._commentary_keywords: val = self.b[keyword][0] else: val = self.b[keyword] self._writeln(f' Extra keyword {keyword!r:8} in b: {val!r}') if self.diff_duplicate_keywords: for keyword, count in sorted(self.diff_duplicate_keywords.items()): self._writeln(f' Inconsistent duplicates of keyword {keyword!r:8}:') self._writeln(' Occurs {} time(s) in a, {} times in (b)' .format(*count)) if self.diff_keyword_values or self.diff_keyword_comments: for keyword in self.common_keywords: report_diff_keyword_attr(self._fileobj, 'values', self.diff_keyword_values, keyword, ind=self._indent) report_diff_keyword_attr(self._fileobj, 'comments', self.diff_keyword_comments, keyword, ind=self._indent) # TODO: It might be good if there was also a threshold option for percentage of # different pixels: For example ignore if only 1% of the pixels are different # within some threshold. There are lots of possibilities here, but hold off # for now until specific cases come up. class ImageDataDiff(_BaseDiff): """ Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE extension HDU, though the data unit is assumed to be "pixels"). `ImageDataDiff` objects have the following diff attributes: - ``diff_dimensions``: If the two arrays contain either a different number of dimensions or different sizes in any dimension, this contains a 2-tuple of the shapes of each array. Currently no further comparison is performed on images that don't have the exact same dimensions. - ``diff_pixels``: If the two images contain any different pixels, this contains a list of 2-tuples of the array index where the difference was found, and another 2-tuple containing the different values. For example, if the pixel at (0, 0) contains different values this would look like:: [(0, 0), (1.1, 2.2)] where 1.1 and 2.2 are the values of that pixel in each array. This array only contains up to ``self.numdiffs`` differences, for storage efficiency. - ``diff_total``: The total number of different pixels found between the arrays. Although ``diff_pixels`` does not necessarily contain all the different pixel values, this can be used to get a count of the total number of differences found. - ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number of pixels in the arrays. """ def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0): """ Parameters ---------- a : BaseHDU An HDU object. b : BaseHDU An HDU object to compare to the first HDU object. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionadded:: 2.0 atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 """ self.numdiffs = numdiffs self.rtol = rtol self.atol = atol self.diff_dimensions = () self.diff_pixels = [] self.diff_ratio = 0 # self.diff_pixels only holds up to numdiffs differing pixels, but this # self.diff_total stores the total count of differences between # the images, but not the different values self.diff_total = 0 super().__init__(a, b) def _diff(self): if self.a.shape != self.b.shape: self.diff_dimensions = (self.a.shape, self.b.shape) # Don't do any further comparison if the dimensions differ # TODO: Perhaps we could, however, diff just the intersection # between the two images return # Find the indices where the values are not equal # If neither a nor b are floating point (or complex), ignore rtol and # atol if not (np.issubdtype(self.a.dtype, np.inexact) or np.issubdtype(self.b.dtype, np.inexact)): rtol = 0 atol = 0 else: rtol = self.rtol atol = self.atol diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol) self.diff_total = len(diffs[0]) if self.diff_total == 0: # Then we're done return if self.numdiffs < 0: numdiffs = self.diff_total else: numdiffs = self.numdiffs self.diff_pixels = [(idx, (self.a[idx], self.b[idx])) for idx in islice(zip(*diffs), 0, numdiffs)] self.diff_ratio = float(self.diff_total) / float(len(self.a.flat)) def _report(self): if self.diff_dimensions: dimsa = ' x '.join(str(d) for d in reversed(self.diff_dimensions[0])) dimsb = ' x '.join(str(d) for d in reversed(self.diff_dimensions[1])) self._writeln(' Data dimensions differ:') self._writeln(f' a: {dimsa}') self._writeln(f' b: {dimsb}') # For now we don't do any further comparison if the dimensions # differ; though in the future it might be nice to be able to # compare at least where the images intersect self._writeln(' No further data comparison performed.') return if not self.diff_pixels: return for index, values in self.diff_pixels: index = [x + 1 for x in reversed(index)] self._writeln(f' Data differs at {index}:') report_diff_values(values[0], values[1], fileobj=self._fileobj, indent_width=self._indent + 1, rtol=self.rtol, atol=self.atol) if self.diff_total > self.numdiffs: self._writeln(' ...') self._writeln(' {} different pixels found ({:.2%} different).' .format(self.diff_total, self.diff_ratio)) class RawDataDiff(ImageDataDiff): """ `RawDataDiff` is just a special case of `ImageDataDiff` where the images are one-dimensional, and the data is treated as a 1-dimensional array of bytes instead of pixel values. This is used to compare the data of two non-standard extension HDUs that were not recognized as containing image or table data. `ImageDataDiff` objects have the following diff attributes: - ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of `ImageDataDiff` objects. Though the "dimension" of each array is just an integer representing the number of bytes in the data. - ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff` objects, but renamed to reflect the minor semantic difference that these are raw bytes and not pixel values. Also the indices are integers instead of tuples. - ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`. """ def __init__(self, a, b, numdiffs=10): """ Parameters ---------- a : BaseHDU An HDU object. b : BaseHDU An HDU object to compare to the first HDU object. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). """ self.diff_dimensions = () self.diff_bytes = [] super().__init__(a, b, numdiffs=numdiffs) def _diff(self): super()._diff() if self.diff_dimensions: self.diff_dimensions = (self.diff_dimensions[0][0], self.diff_dimensions[1][0]) self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels] del self.diff_pixels def _report(self): if self.diff_dimensions: self._writeln(' Data sizes differ:') self._writeln(f' a: {self.diff_dimensions[0]} bytes') self._writeln(f' b: {self.diff_dimensions[1]} bytes') # For now we don't do any further comparison if the dimensions # differ; though in the future it might be nice to be able to # compare at least where the images intersect self._writeln(' No further data comparison performed.') return if not self.diff_bytes: return for index, values in self.diff_bytes: self._writeln(f' Data differs at byte {index}:') report_diff_values(values[0], values[1], fileobj=self._fileobj, indent_width=self._indent + 1, rtol=self.rtol, atol=self.atol) self._writeln(' ...') self._writeln(' {} different bytes found ({:.2%} different).' .format(self.diff_total, self.diff_ratio)) class TableDataDiff(_BaseDiff): """ Diff two table data arrays. It doesn't matter whether the data originally came from a binary or ASCII table--the data should be passed in as a recarray. `TableDataDiff` objects have the following diff attributes: - ``diff_column_count``: If the tables being compared have different numbers of columns, this contains a 2-tuple of the column count in each table. Even if the tables have different column counts, an attempt is still made to compare any columns they have in common. - ``diff_columns``: If either table contains columns unique to that table, either in name or format, this contains a 2-tuple of lists. The first element is a list of columns (these are full `Column` objects) that appear only in table a. The second element is a list of tables that appear only in table b. This only lists columns with different column definitions, and has nothing to do with the data in those columns. - ``diff_column_names``: This is like ``diff_columns``, but lists only the names of columns unique to either table, rather than the full `Column` objects. - ``diff_column_attributes``: Lists columns that are in both tables but have different secondary attributes, such as TUNIT or TDISP. The format is a list of 2-tuples: The first a tuple of the column name and the attribute, the second a tuple of the different values. - ``diff_values``: `TableDataDiff` compares the data in each table on a column-by-column basis. If any different data is found, it is added to this list. The format of this list is similar to the ``diff_pixels`` attribute on `ImageDataDiff` objects, though the "index" consists of a (column_name, row) tuple. For example:: [('TARGET', 0), ('NGC1001', 'NGC1002')] shows that the tables contain different values in the 0-th row of the 'TARGET' column. - ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`. `TableDataDiff` objects also have a ``common_columns`` attribute that lists the `Column` objects for columns that are identical in both tables, and a ``common_column_names`` attribute which contains a set of the names of those columns. """ def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0): """ Parameters ---------- a : BaseHDU An HDU object. b : BaseHDU An HDU object to compare to the first HDU object. ignore_fields : sequence, optional The (case-insensitive) names of any table columns to ignore if any table data is to be compared. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionadded:: 2.0 atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 """ self.ignore_fields = set(ignore_fields) self.numdiffs = numdiffs self.rtol = rtol self.atol = atol self.common_columns = [] self.common_column_names = set() # self.diff_columns contains columns with different column definitions, # but not different column data. Column data is only compared in # columns that have the same definitions self.diff_rows = () self.diff_column_count = () self.diff_columns = () # If two columns have the same name+format, but other attributes are # different (such as TUNIT or such) they are listed here self.diff_column_attributes = [] # Like self.diff_columns, but just contains a list of the column names # unique to each table, and in the order they appear in the tables self.diff_column_names = () self.diff_values = [] self.diff_ratio = 0 self.diff_total = 0 super().__init__(a, b) def _diff(self): # Much of the code for comparing columns is similar to the code for # comparing headers--consider refactoring colsa = self.a.columns colsb = self.b.columns if len(colsa) != len(colsb): self.diff_column_count = (len(colsa), len(colsb)) # Even if the number of columns are unequal, we still do comparison of # any common columns colsa = {c.name.lower(): c for c in colsa} colsb = {c.name.lower(): c for c in colsb} if '*' in self.ignore_fields: # If all columns are to be ignored, ignore any further differences # between the columns return # Keep the user's original ignore_fields list for reporting purposes, # but internally use a case-insensitive version ignore_fields = {f.lower() for f in self.ignore_fields} # It might be nice if there were a cleaner way to do this, but for now # it'll do for fieldname in ignore_fields: fieldname = fieldname.lower() if fieldname in colsa: del colsa[fieldname] if fieldname in colsb: del colsb[fieldname] colsa_set = set(colsa.values()) colsb_set = set(colsb.values()) self.common_columns = sorted(colsa_set.intersection(colsb_set), key=operator.attrgetter('name')) self.common_column_names = {col.name.lower() for col in self.common_columns} left_only_columns = {col.name.lower(): col for col in colsa_set.difference(colsb_set)} right_only_columns = {col.name.lower(): col for col in colsb_set.difference(colsa_set)} if left_only_columns or right_only_columns: self.diff_columns = (left_only_columns, right_only_columns) self.diff_column_names = ([], []) if left_only_columns: for col in self.a.columns: if col.name.lower() in left_only_columns: self.diff_column_names[0].append(col.name) if right_only_columns: for col in self.b.columns: if col.name.lower() in right_only_columns: self.diff_column_names[1].append(col.name) # If the tables have a different number of rows, we don't compare the # columns right now. # TODO: It might be nice to optionally compare the first n rows where n # is the minimum of the row counts between the two tables. if len(self.a) != len(self.b): self.diff_rows = (len(self.a), len(self.b)) return # If the tables contain no rows there's no data to compare, so we're # done at this point. (See ticket #178) if len(self.a) == len(self.b) == 0: return # Like in the old fitsdiff, compare tables on a column by column basis # The difficulty here is that, while FITS column names are meant to be # case-insensitive, Astropy still allows, for the sake of flexibility, # two columns with the same name but different case. When columns are # accessed in FITS tables, a case-sensitive is tried first, and failing # that a case-insensitive match is made. # It's conceivable that the same column could appear in both tables # being compared, but with different case. # Though it *may* lead to inconsistencies in these rare cases, this # just assumes that there are no duplicated column names in either # table, and that the column names can be treated case-insensitively. for col in self.common_columns: name_lower = col.name.lower() if name_lower in ignore_fields: continue cola = colsa[name_lower] colb = colsb[name_lower] for attr, _ in _COL_ATTRS: vala = getattr(cola, attr, None) valb = getattr(colb, attr, None) if diff_values(vala, valb): self.diff_column_attributes.append( ((col.name.upper(), attr), (vala, valb))) arra = self.a[col.name] arrb = self.b[col.name] if (np.issubdtype(arra.dtype, np.floating) and np.issubdtype(arrb.dtype, np.floating)): diffs = where_not_allclose(arra, arrb, rtol=self.rtol, atol=self.atol) elif 'P' in col.format: diffs = ([idx for idx in range(len(arra)) if not np.allclose(arra[idx], arrb[idx], rtol=self.rtol, atol=self.atol)],) else: diffs = np.where(arra != arrb) self.diff_total += len(set(diffs[0])) if self.numdiffs >= 0: if len(self.diff_values) >= self.numdiffs: # Don't save any more diff values continue # Add no more diff'd values than this max_diffs = self.numdiffs - len(self.diff_values) else: max_diffs = len(diffs[0]) last_seen_idx = None for idx in islice(diffs[0], 0, max_diffs): if idx == last_seen_idx: # Skip duplicate indices, which my occur when the column # data contains multi-dimensional values; we're only # interested in storing row-by-row differences continue last_seen_idx = idx self.diff_values.append(((col.name, idx), (arra[idx], arrb[idx]))) total_values = len(self.a) * len(self.a.dtype.fields) self.diff_ratio = float(self.diff_total) / float(total_values) def _report(self): if self.diff_column_count: self._writeln(' Tables have different number of columns:') self._writeln(f' a: {self.diff_column_count[0]}') self._writeln(f' b: {self.diff_column_count[1]}') if self.diff_column_names: # Show columns with names unique to either table for name in self.diff_column_names[0]: format = self.diff_columns[0][name.lower()].format self._writeln(f' Extra column {name} of format {format} in a') for name in self.diff_column_names[1]: format = self.diff_columns[1][name.lower()].format self._writeln(f' Extra column {name} of format {format} in b') col_attrs = dict(_COL_ATTRS) # Now go through each table again and show columns with common # names but other property differences... for col_attr, vals in self.diff_column_attributes: name, attr = col_attr self._writeln(f' Column {name} has different {col_attrs[attr]}:') report_diff_values(vals[0], vals[1], fileobj=self._fileobj, indent_width=self._indent + 1, rtol=self.rtol, atol=self.atol) if self.diff_rows: self._writeln(' Table rows differ:') self._writeln(f' a: {self.diff_rows[0]}') self._writeln(f' b: {self.diff_rows[1]}') self._writeln(' No further data comparison performed.') return if not self.diff_values: return # Finally, let's go through and report column data differences: for indx, values in self.diff_values: self._writeln(' Column {} data differs in row {}:'.format(*indx)) report_diff_values(values[0], values[1], fileobj=self._fileobj, indent_width=self._indent + 1, rtol=self.rtol, atol=self.atol) if self.diff_values and self.numdiffs < self.diff_total: self._writeln(' ...{} additional difference(s) found.'.format( self.diff_total - self.numdiffs)) if self.diff_total > self.numdiffs: self._writeln(' ...') self._writeln(' {} different table data element(s) found ' '({:.2%} different).' .format(self.diff_total, self.diff_ratio)) def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0): """ Write a diff between two header keyword values or comments to the specified file-like object. """ if keyword in diffs: vals = diffs[keyword] for idx, val in enumerate(vals): if val is None: continue if idx == 0: dup = '' else: dup = f'[{idx + 1}]' fileobj.write( fixed_width_indent(' Keyword {:8}{} has different {}:\n' .format(keyword, dup, attr), ind)) report_diff_values(val[0], val[1], fileobj=fileobj, indent_width=ind + 1)
4c7b8eeda2d23eefc444c06d99e7f5fc728db016c9941911ebf3a7397925bf33
# Licensed under a 3-clause BSD style license - see PYFITS.rst import copy import operator import re import sys import warnings import weakref import numbers from functools import reduce from collections import OrderedDict from contextlib import suppress import numpy as np from numpy import char as chararray from .card import Card, CARD_LENGTH from .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp, NotifierMixin) from .verify import VerifyError, VerifyWarning from astropy.utils import lazyproperty, isiterable, indent from astropy.utils.exceptions import AstropyUserWarning __all__ = ['Column', 'ColDefs', 'Delayed'] # mapping from TFORM data type to numpy data type (code) # L: Logical (Boolean) # B: Unsigned Byte # I: 16-bit Integer # J: 32-bit Integer # K: 64-bit Integer # E: Single-precision Floating Point # D: Double-precision Floating Point # C: Single-precision Complex # M: Double-precision Complex # A: Character FITS2NUMPY = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4', 'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'} # the inverse dictionary of the above NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()} # Normally booleans are represented as ints in Astropy, but if passed in a numpy # boolean array, that should be supported NUMPY2FITS['b1'] = 'L' # Add unsigned types, which will be stored as signed ints with a TZERO card. NUMPY2FITS['u2'] = 'I' NUMPY2FITS['u4'] = 'J' NUMPY2FITS['u8'] = 'K' # Add half precision floating point numbers which will be up-converted to # single precision. NUMPY2FITS['f2'] = 'E' # This is the order in which values are converted to FITS types # Note that only double precision floating point/complex are supported FORMATORDER = ['L', 'B', 'I', 'J', 'K', 'D', 'M', 'A'] # Convert single precision floating point/complex to double precision. FITSUPCONVERTERS = {'E': 'D', 'C': 'M'} # mapping from ASCII table TFORM data type to numpy data type # A: Character # I: Integer (32-bit) # J: Integer (64-bit; non-standard) # F: Float (64-bit; fixed decimal notation) # E: Float (64-bit; exponential notation) # D: Float (64-bit; exponential notation, always 64-bit by convention) ASCII2NUMPY = {'A': 'a', 'I': 'i4', 'J': 'i8', 'F': 'f8', 'E': 'f8', 'D': 'f8'} # Maps FITS ASCII column format codes to the appropriate Python string # formatting codes for that type. ASCII2STR = {'A': '', 'I': 'd', 'J': 'd', 'F': 'f', 'E': 'E', 'D': 'E'} # For each ASCII table format code, provides a default width (and decimal # precision) for when one isn't given explicitly in the column format ASCII_DEFAULT_WIDTHS = {'A': (1, 0), 'I': (10, 0), 'J': (15, 0), 'E': (15, 7), 'F': (16, 7), 'D': (25, 17)} # TDISPn for both ASCII and Binary tables TDISP_RE_DICT = {} TDISP_RE_DICT['F'] = re.compile(r'(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}' r'(?P<precision>[0-9])+)+)|') TDISP_RE_DICT['A'] = TDISP_RE_DICT['L'] = \ re.compile(r'(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|') TDISP_RE_DICT['I'] = TDISP_RE_DICT['B'] = \ TDISP_RE_DICT['O'] = TDISP_RE_DICT['Z'] = \ re.compile(r'(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)' r'(?:\.{0,1}(?P<precision>[0-9]+))?))|') TDISP_RE_DICT['E'] = TDISP_RE_DICT['G'] = \ TDISP_RE_DICT['D'] = \ re.compile(r'(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\.' r'(?P<precision>[0-9]+))+)' r'(?:E{0,1}(?P<exponential>[0-9]+)?)|') TDISP_RE_DICT['EN'] = TDISP_RE_DICT['ES'] = \ re.compile(r'(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}' r'(?P<precision>[0-9])+)+)') # mapping from TDISP format to python format # A: Character # L: Logical (Boolean) # I: 16-bit Integer # Can't predefine zero padding and space padding before hand without # knowing the value being formatted, so grabbing precision and using that # to zero pad, ignoring width. Same with B, O, and Z # B: Binary Integer # O: Octal Integer # Z: Hexadecimal Integer # F: Float (64-bit; fixed decimal notation) # EN: Float (engineering fortran format, exponential multiple of thee # ES: Float (scientific, same as EN but non-zero leading digit # E: Float, exponential notation # Can't get exponential restriction to work without knowing value # before hand, so just using width and precision, same with D, G, EN, and # ES formats # D: Double-precision Floating Point with exponential # (E but for double precision) # G: Double-precision Floating Point, may or may not show exponent TDISP_FMT_DICT = { 'I': '{{:{width}d}}', 'B': '{{:{width}b}}', 'O': '{{:{width}o}}', 'Z': '{{:{width}x}}', 'F': '{{:{width}.{precision}f}}', 'G': '{{:{width}.{precision}g}}' } TDISP_FMT_DICT['A'] = TDISP_FMT_DICT['L'] = '{{:>{width}}}' TDISP_FMT_DICT['E'] = TDISP_FMT_DICT['D'] = \ TDISP_FMT_DICT['EN'] = TDISP_FMT_DICT['ES'] = '{{:{width}.{precision}e}}' # tuple of column/field definition common names and keyword names, make # sure to preserve the one-to-one correspondence when updating the list(s). # Use lists, instead of dictionaries so the names can be displayed in a # preferred order. KEYWORD_NAMES = ('TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM', 'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS') KEYWORD_ATTRIBUTES = ('name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim', 'coord_type', 'coord_unit', 'coord_ref_point', 'coord_ref_value', 'coord_inc', 'time_ref_pos') """This is a list of the attributes that can be set on `Column` objects.""" KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES)) ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES)) # TODO: Define a list of default comments to associate with each table keyword # TFORMn regular expression TFORMAT_RE = re.compile(r'(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])' r'(?P<option>[!-~]*)', re.I) # TFORMn for ASCII tables; two different versions depending on whether # the format is floating-point or not; allows empty values for width # in which case defaults are used TFORMAT_ASCII_RE = re.compile(r'(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|' r'(?:(?P<formatf>[FED])' r'(?:(?P<widthf>[0-9]+)(?:\.' r'(?P<precision>[0-9]+))?)?)') TTYPE_RE = re.compile(r'[0-9a-zA-Z_]+') """ Regular expression for valid table column names. See FITS Standard v3.0 section 7.2.2. """ # table definition keyword regular expression TDEF_RE = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') # table dimension keyword regular expression (fairly flexible with whitespace) TDIM_RE = re.compile(r'\(\s*(?P<dims>(?:\d+\s*)(?:,\s*\d+\s*)*\s*)\)\s*') # value for ASCII table cell with value = TNULL # this can be reset by user. ASCIITNULL = 0 # The default placeholder to use for NULL values in ASCII tables when # converting from binary to ASCII tables DEFAULT_ASCII_TNULL = '---' class Delayed: """Delayed file-reading data.""" def __init__(self, hdu=None, field=None): self.hdu = weakref.proxy(hdu) self.field = field def __getitem__(self, key): # This forces the data for the HDU to be read, which will replace # the corresponding Delayed objects in the Tables Columns to be # transformed into ndarrays. It will also return the value of the # requested data element. return self.hdu.data[key][self.field] class _BaseColumnFormat(str): """ Base class for binary table column formats (just called _ColumnFormat) and ASCII table column formats (_AsciiColumnFormat). """ def __eq__(self, other): if not other: return False if isinstance(other, str): if not isinstance(other, self.__class__): try: other = self.__class__(other) except ValueError: return False else: return False return self.canonical == other.canonical def __hash__(self): return hash(self.canonical) @lazyproperty def dtype(self): """ The Numpy dtype object created from the format's associated recformat. """ return np.dtype(self.recformat) @classmethod def from_column_format(cls, format): """Creates a column format object from another column format object regardless of their type. That is, this can convert a _ColumnFormat to an _AsciiColumnFormat or vice versa at least in cases where a direct translation is possible. """ return cls.from_recformat(format.recformat) class _ColumnFormat(_BaseColumnFormat): """ Represents a FITS binary table column format. This is an enhancement over using a normal string for the format, since the repeat count, format code, and option are available as separate attributes, and smart comparison is used. For example 1J == J. """ def __new__(cls, format): self = super().__new__(cls, format) self.repeat, self.format, self.option = _parse_tformat(format) self.format = self.format.upper() if self.format in ('P', 'Q'): # TODO: There should be a generic factory that returns either # _FormatP or _FormatQ as appropriate for a given TFORMn if self.format == 'P': recformat = _FormatP.from_tform(format) else: recformat = _FormatQ.from_tform(format) # Format of variable length arrays self.p_format = recformat.format else: self.p_format = None return self @classmethod def from_recformat(cls, recformat): """Creates a column format from a Numpy record dtype format.""" return cls(_convert_format(recformat, reverse=True)) @lazyproperty def recformat(self): """Returns the equivalent Numpy record format string.""" return _convert_format(self) @lazyproperty def canonical(self): """ Returns a 'canonical' string representation of this format. This is in the proper form of rTa where T is the single character data type code, a is the optional part, and r is the repeat. If repeat == 1 (the default) it is left out of this representation. """ if self.repeat == 1: repeat = '' else: repeat = str(self.repeat) return f'{repeat}{self.format}{self.option}' class _AsciiColumnFormat(_BaseColumnFormat): """Similar to _ColumnFormat but specifically for columns in ASCII tables. The formats of ASCII table columns and binary table columns are inherently incompatible in FITS. They don't support the same ranges and types of values, and even reuse format codes in subtly different ways. For example the format code 'Iw' in ASCII columns refers to any integer whose string representation is at most w characters wide, so 'I' can represent effectively any integer that will fit in a FITS columns. Whereas for binary tables 'I' very explicitly refers to a 16-bit signed integer. Conversions between the two column formats can be performed using the ``to/from_binary`` methods on this class, or the ``to/from_ascii`` methods on the `_ColumnFormat` class. But again, not all conversions are possible and may result in a `ValueError`. """ def __new__(cls, format, strict=False): self = super().__new__(cls, format) self.format, self.width, self.precision = \ _parse_ascii_tformat(format, strict) # If no width has been specified, set the dtype here to default as well if format == self.format: self.recformat = ASCII2NUMPY[format] # This is to support handling logical (boolean) data from binary tables # in an ASCII table self._pseudo_logical = False return self @classmethod def from_column_format(cls, format): inst = cls.from_recformat(format.recformat) # Hack if format.format == 'L': inst._pseudo_logical = True return inst @classmethod def from_recformat(cls, recformat): """Creates a column format from a Numpy record dtype format.""" return cls(_convert_ascii_format(recformat, reverse=True)) @lazyproperty def recformat(self): """Returns the equivalent Numpy record format string.""" return _convert_ascii_format(self) @lazyproperty def canonical(self): """ Returns a 'canonical' string representation of this format. This is in the proper form of Tw.d where T is the single character data type code, w is the width in characters for this field, and d is the number of digits after the decimal place (for format codes 'E', 'F', and 'D' only). """ if self.format in ('E', 'F', 'D'): return f'{self.format}{self.width}.{self.precision}' return f'{self.format}{self.width}' class _FormatX(str): """For X format in binary tables.""" def __new__(cls, repeat=1): nbytes = ((repeat - 1) // 8) + 1 # use an array, even if it is only ONE u1 (i.e. use tuple always) obj = super().__new__(cls, repr((nbytes,)) + 'u1') obj.repeat = repeat return obj def __getnewargs__(self): return (self.repeat,) @property def tform(self): return f'{self.repeat}X' # TODO: Table column formats need to be verified upon first reading the file; # as it is, an invalid P format will raise a VerifyError from some deep, # unexpected place class _FormatP(str): """For P format in variable length table.""" # As far as I can tell from my reading of the FITS standard, a type code is # *required* for P and Q formats; there is no default _format_re_template = (r'(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])' r'(?:\((?P<max>\d*)\))?') _format_code = 'P' _format_re = re.compile(_format_re_template.format(_format_code)) _descriptor_format = '2i4' def __new__(cls, dtype, repeat=None, max=None): obj = super().__new__(cls, cls._descriptor_format) obj.format = NUMPY2FITS[dtype] obj.dtype = dtype obj.repeat = repeat obj.max = max return obj def __getnewargs__(self): return (self.dtype, self.repeat, self.max) @classmethod def from_tform(cls, format): m = cls._format_re.match(format) if not m or m.group('dtype') not in FITS2NUMPY: raise VerifyError(f'Invalid column format: {format}') repeat = m.group('repeat') array_dtype = m.group('dtype') max = m.group('max') if not max: max = None return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max) @property def tform(self): repeat = '' if self.repeat is None else self.repeat max = '' if self.max is None else self.max return f'{repeat}{self._format_code}{self.format}({max})' class _FormatQ(_FormatP): """Carries type description of the Q format for variable length arrays. The Q format is like the P format but uses 64-bit integers in the array descriptors, allowing for heaps stored beyond 2GB into a file. """ _format_code = 'Q' _format_re = re.compile(_FormatP._format_re_template.format(_format_code)) _descriptor_format = '2i8' class ColumnAttribute: """ Descriptor for attributes of `Column` that are associated with keywords in the FITS header and describe properties of the column as specified in the FITS standard. Each `ColumnAttribute` may have a ``validator`` method defined on it. This validates values set on this attribute to ensure that they meet the FITS standard. Invalid values will raise a warning and will not be used in formatting the column. The validator should take two arguments--the `Column` it is being assigned to, and the new value for the attribute, and it must raise an `AssertionError` if the value is invalid. The `ColumnAttribute` itself is a decorator that can be used to define the ``validator`` for each column attribute. For example:: @ColumnAttribute('TTYPE') def name(col, name): if not isinstance(name, str): raise AssertionError The actual object returned by this decorator is the `ColumnAttribute` instance though, not the ``name`` function. As such ``name`` is not a method of the class it is defined in. The setter for `ColumnAttribute` also updates the header of any table HDU this column is attached to in order to reflect the change. The ``validator`` should ensure that the value is valid for inclusion in a FITS header. """ def __init__(self, keyword): self._keyword = keyword self._validator = None # The name of the attribute associated with this keyword is currently # determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be # make more flexible in the future, for example, to support custom # column attributes. self._attr = '_' + KEYWORD_TO_ATTRIBUTE[self._keyword] def __get__(self, obj, objtype=None): if obj is None: return self else: return getattr(obj, self._attr) def __set__(self, obj, value): if self._validator is not None: self._validator(obj, value) old_value = getattr(obj, self._attr, None) setattr(obj, self._attr, value) obj._notify('column_attribute_changed', obj, self._attr[1:], old_value, value) def __call__(self, func): """ Set the validator for this column attribute. Returns ``self`` so that this can be used as a decorator, as described in the docs for this class. """ self._validator = func return self def __repr__(self): return f"{self.__class__.__name__}('{self._keyword}')" class Column(NotifierMixin): """ Class which contains the definition of one column, e.g. ``ttype``, ``tform``, etc. and the array containing values for the column. """ def __init__(self, name=None, format=None, unit=None, null=None, bscale=None, bzero=None, disp=None, start=None, dim=None, array=None, ascii=None, coord_type=None, coord_unit=None, coord_ref_point=None, coord_ref_value=None, coord_inc=None, time_ref_pos=None): """ Construct a `Column` by specifying attributes. All attributes except ``format`` can be optional; see :ref:`astropy:column_creation` and :ref:`astropy:creating_ascii_table` for more information regarding ``TFORM`` keyword. Parameters ---------- name : str, optional column name, corresponding to ``TTYPE`` keyword format : str column format, corresponding to ``TFORM`` keyword unit : str, optional column unit, corresponding to ``TUNIT`` keyword null : str, optional null value, corresponding to ``TNULL`` keyword bscale : int-like, optional bscale value, corresponding to ``TSCAL`` keyword bzero : int-like, optional bzero value, corresponding to ``TZERO`` keyword disp : str, optional display format, corresponding to ``TDISP`` keyword start : int, optional column starting position (ASCII table only), corresponding to ``TBCOL`` keyword dim : str, optional column dimension corresponding to ``TDIM`` keyword array : iterable, optional a `list`, `numpy.ndarray` (or other iterable that can be used to initialize an ndarray) providing initial data for this column. The array will be automatically converted, if possible, to the data format of the column. In the case were non-trivial ``bscale`` and/or ``bzero`` arguments are given, the values in the array must be the *physical* values--that is, the values of column as if the scaling has already been applied (the array stored on the column object will then be converted back to its storage values). ascii : bool, optional set `True` if this describes a column for an ASCII table; this may be required to disambiguate the column format coord_type : str, optional coordinate/axis type corresponding to ``TCTYP`` keyword coord_unit : str, optional coordinate/axis unit corresponding to ``TCUNI`` keyword coord_ref_point : int-like, optional pixel coordinate of the reference point corresponding to ``TCRPX`` keyword coord_ref_value : int-like, optional coordinate value at reference point corresponding to ``TCRVL`` keyword coord_inc : int-like, optional coordinate increment at reference point corresponding to ``TCDLT`` keyword time_ref_pos : str, optional reference position for a time coordinate column corresponding to ``TRPOS`` keyword """ if format is None: raise ValueError('Must specify format to construct Column.') # any of the input argument (except array) can be a Card or just # a number/string kwargs = {'ascii': ascii} for attr in KEYWORD_ATTRIBUTES: value = locals()[attr] # get the argument's value if isinstance(value, Card): value = value.value kwargs[attr] = value valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs) if invalid_kwargs: msg = ['The following keyword arguments to Column were invalid:'] for val in invalid_kwargs.values(): msg.append(indent(val[1])) raise VerifyError('\n'.join(msg)) for attr in KEYWORD_ATTRIBUTES: setattr(self, attr, valid_kwargs.get(attr)) # TODO: Try to eliminate the following two special cases # for recformat and dim: # This is not actually stored as an attribute on columns for some # reason recformat = valid_kwargs['recformat'] # The 'dim' keyword's original value is stored in self.dim, while # *only* the tuple form is stored in self._dims. self._dims = self.dim self.dim = dim # Awful hack to use for now to keep track of whether the column holds # pseudo-unsigned int data self._pseudo_unsigned_ints = False # if the column data is not ndarray, make it to be one, i.e. # input arrays can be just list or tuple, not required to be ndarray # does not include Object array because there is no guarantee # the elements in the object array are consistent. if not isinstance(array, (np.ndarray, chararray.chararray, Delayed)): try: # try to convert to a ndarray first if array is not None: array = np.array(array) except Exception: try: # then try to convert it to a strings array itemsize = int(recformat[1:]) array = chararray.array(array, itemsize=itemsize) except ValueError: # then try variable length array # Note: This includes _FormatQ by inheritance if isinstance(recformat, _FormatP): array = _VLF(array, dtype=recformat.dtype) else: raise ValueError('Data is inconsistent with the ' 'format `{}`.'.format(format)) array = self._convert_to_valid_data_type(array) # We have required (through documentation) that arrays passed in to # this constructor are already in their physical values, so we make # note of that here if isinstance(array, np.ndarray): self._physical_values = True else: self._physical_values = False self._parent_fits_rec = None self.array = array def __repr__(self): text = '' for attr in KEYWORD_ATTRIBUTES: value = getattr(self, attr) if value is not None: text += attr + ' = ' + repr(value) + '; ' return text[:-2] def __eq__(self, other): """ Two columns are equal if their name and format are the same. Other attributes aren't taken into account at this time. """ # According to the FITS standard column names must be case-insensitive a = (self.name.lower(), self.format) b = (other.name.lower(), other.format) return a == b def __hash__(self): """ Like __eq__, the hash of a column should be based on the unique column name and format, and be case-insensitive with respect to the column name. """ return hash((self.name.lower(), self.format)) @property def array(self): """ The Numpy `~numpy.ndarray` associated with this `Column`. If the column was instantiated with an array passed to the ``array`` argument, this will return that array. However, if the column is later added to a table, such as via `BinTableHDU.from_columns` as is typically the case, this attribute will be updated to reference the associated field in the table, which may no longer be the same array. """ # Ideally the .array attribute never would have existed in the first # place, or would have been internal-only. This is a legacy of the # older design from Astropy that needs to have continued support, for # now. # One of the main problems with this design was that it created a # reference cycle. When the .array attribute was updated after # creating a FITS_rec from the column (as explained in the docstring) a # reference cycle was created. This is because the code in BinTableHDU # (and a few other places) does essentially the following: # # data._coldefs = columns # The ColDefs object holding this Column # for col in columns: # col.array = data.field(col.name) # # This way each columns .array attribute now points to the field in the # table data. It's actually a pretty confusing interface (since it # replaces the array originally pointed to by .array), but it's the way # things have been for a long, long time. # # However, this results, in *many* cases, in a reference cycle. # Because the array returned by data.field(col.name), while sometimes # an array that owns its own data, is usually like a slice of the # original data. It has the original FITS_rec as the array .base. # This results in the following reference cycle (for the n-th column): # # data -> data._coldefs -> data._coldefs[n] -> # data._coldefs[n].array -> data._coldefs[n].array.base -> data # # Because ndarray objects do not handled by Python's garbage collector # the reference cycle cannot be broken. Therefore the FITS_rec's # refcount never goes to zero, its __del__ is never called, and its # memory is never freed. This didn't occur in *all* cases, but it did # occur in many cases. # # To get around this, Column.array is no longer a simple attribute # like it was previously. Now each Column has a ._parent_fits_rec # attribute which is a weakref to a FITS_rec object. Code that # previously assigned each col.array to field in a FITS_rec (as in # the example a few paragraphs above) is still used, however now # array.setter checks if a reference cycle will be created. And if # so, instead of saving directly to the Column's __dict__, it creates # the ._prent_fits_rec weakref, and all lookups of the column's .array # go through that instead. # # This alone does not fully solve the problem. Because # _parent_fits_rec is a weakref, if the user ever holds a reference to # the Column, but deletes all references to the underlying FITS_rec, # the .array attribute would suddenly start returning None instead of # the array data. This problem is resolved on FITS_rec's end. See the # note in the FITS_rec._coldefs property for the rest of the story. # If the Columns's array is not a reference to an existing FITS_rec, # then it is just stored in self.__dict__; otherwise check the # _parent_fits_rec reference if it 's still available. if 'array' in self.__dict__: return self.__dict__['array'] elif self._parent_fits_rec is not None: parent = self._parent_fits_rec() if parent is not None: return parent[self.name] else: return None @array.setter def array(self, array): # The following looks over the bases of the given array to check if it # has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs # contains this Column itself, and would create a reference cycle if we # stored the array directly in self.__dict__. # In this case it instead sets up the _parent_fits_rec weakref to the # underlying FITS_rec, so that array.getter can return arrays through # self._parent_fits_rec().field(self.name), rather than storing a # hard reference to the field like it used to. base = array while True: if (hasattr(base, '_coldefs') and isinstance(base._coldefs, ColDefs)): for col in base._coldefs: if col is self and self._parent_fits_rec is None: self._parent_fits_rec = weakref.ref(base) # Just in case the user already set .array to their own # array. if 'array' in self.__dict__: del self.__dict__['array'] return if getattr(base, 'base', None) is not None: base = base.base else: break self.__dict__['array'] = array @array.deleter def array(self): try: del self.__dict__['array'] except KeyError: pass self._parent_fits_rec = None @ColumnAttribute('TTYPE') def name(col, name): if name is None: # Allow None to indicate deleting the name, or to just indicate an # unspecified name (when creating a new Column). return # Check that the name meets the recommended standard--other column # names are *allowed*, but will be discouraged if isinstance(name, str) and not TTYPE_RE.match(name): warnings.warn( 'It is strongly recommended that column names contain only ' 'upper and lower-case ASCII letters, digits, or underscores ' 'for maximum compatibility with other software ' '(got {!r}).'.format(name), VerifyWarning) # This ensures that the new name can fit into a single FITS card # without any special extension like CONTINUE cards or the like. if (not isinstance(name, str) or len(str(Card('TTYPE', name))) != CARD_LENGTH): raise AssertionError( 'Column name must be a string able to fit in a single ' 'FITS card--typically this means a maximum of 68 ' 'characters, though it may be fewer if the string ' 'contains special characters like quotes.') @ColumnAttribute('TCTYP') def coord_type(col, coord_type): if coord_type is None: return if (not isinstance(coord_type, str) or len(coord_type) > 8): raise AssertionError( 'Coordinate/axis type must be a string of atmost 8 ' 'characters.') @ColumnAttribute('TCUNI') def coord_unit(col, coord_unit): if (coord_unit is not None and not isinstance(coord_unit, str)): raise AssertionError( 'Coordinate/axis unit must be a string.') @ColumnAttribute('TCRPX') def coord_ref_point(col, coord_ref_point): if (coord_ref_point is not None and not isinstance(coord_ref_point, numbers.Real)): raise AssertionError( 'Pixel coordinate of the reference point must be ' 'real floating type.') @ColumnAttribute('TCRVL') def coord_ref_value(col, coord_ref_value): if (coord_ref_value is not None and not isinstance(coord_ref_value, numbers.Real)): raise AssertionError( 'Coordinate value at reference point must be real ' 'floating type.') @ColumnAttribute('TCDLT') def coord_inc(col, coord_inc): if (coord_inc is not None and not isinstance(coord_inc, numbers.Real)): raise AssertionError( 'Coordinate increment must be real floating type.') @ColumnAttribute('TRPOS') def time_ref_pos(col, time_ref_pos): if (time_ref_pos is not None and not isinstance(time_ref_pos, str)): raise AssertionError( 'Time reference position must be a string.') format = ColumnAttribute('TFORM') unit = ColumnAttribute('TUNIT') null = ColumnAttribute('TNULL') bscale = ColumnAttribute('TSCAL') bzero = ColumnAttribute('TZERO') disp = ColumnAttribute('TDISP') start = ColumnAttribute('TBCOL') dim = ColumnAttribute('TDIM') @lazyproperty def ascii(self): """Whether this `Column` represents a column in an ASCII table.""" return isinstance(self.format, _AsciiColumnFormat) @lazyproperty def dtype(self): return self.format.dtype def copy(self): """ Return a copy of this `Column`. """ tmp = Column(format='I') # just use a throw-away format tmp.__dict__ = self.__dict__.copy() return tmp @staticmethod def _convert_format(format, cls): """The format argument to this class's initializer may come in many forms. This uses the given column format class ``cls`` to convert to a format of that type. TODO: There should be an abc base class for column format classes """ # Short circuit in case we're already a _BaseColumnFormat--there is at # least one case in which this can happen if isinstance(format, _BaseColumnFormat): return format, format.recformat if format in NUMPY2FITS: with suppress(VerifyError): # legit recarray format? recformat = format format = cls.from_recformat(format) try: # legit FITS format? format = cls(format) recformat = format.recformat except VerifyError: raise VerifyError(f'Illegal format `{format}`.') return format, recformat @classmethod def _verify_keywords(cls, name=None, format=None, unit=None, null=None, bscale=None, bzero=None, disp=None, start=None, dim=None, ascii=None, coord_type=None, coord_unit=None, coord_ref_point=None, coord_ref_value=None, coord_inc=None, time_ref_pos=None): """ Given the keyword arguments used to initialize a Column, specifically those that typically read from a FITS header (so excluding array), verify that each keyword has a valid value. Returns a 2-tuple of dicts. The first maps valid keywords to their values. The second maps invalid keywords to a 2-tuple of their value, and a message explaining why they were found invalid. """ valid = {} invalid = {} try: format, recformat = cls._determine_formats(format, start, dim, ascii) valid.update(format=format, recformat=recformat) except (ValueError, VerifyError) as err: msg = ( f'Column format option (TFORMn) failed verification: {err!s} ' 'The invalid value will be ignored for the purpose of ' 'formatting the data in this column.') invalid['format'] = (format, msg) except AttributeError as err: msg = ( f'Column format option (TFORMn) must be a string with a valid ' f'FITS table format (got {format!s}: {err!s}). ' 'The invalid value will be ignored for the purpose of ' 'formatting the data in this column.') invalid['format'] = (format, msg) # Currently we don't have any validation for name, unit, bscale, or # bzero so include those by default # TODO: Add validation for these keywords, obviously for k, v in [('name', name), ('unit', unit), ('bscale', bscale), ('bzero', bzero)]: if v is not None and v != '': valid[k] = v # Validate null option # Note: Enough code exists that thinks empty strings are sensible # inputs for these options that we need to treat '' as None if null is not None and null != '': msg = None if isinstance(format, _AsciiColumnFormat): null = str(null) if len(null) > format.width: msg = ( "ASCII table null option (TNULLn) is longer than " "the column's character width and will be truncated " "(got {!r}).".format(null)) else: tnull_formats = ('B', 'I', 'J', 'K') if not _is_int(null): # Make this an exception instead of a warning, since any # non-int value is meaningless msg = ( 'Column null option (TNULLn) must be an integer for ' 'binary table columns (got {!r}). The invalid value ' 'will be ignored for the purpose of formatting ' 'the data in this column.'.format(null)) elif not (format.format in tnull_formats or (format.format in ('P', 'Q') and format.p_format in tnull_formats)): # TODO: We should also check that TNULLn's integer value # is in the range allowed by the column's format msg = ( 'Column null option (TNULLn) is invalid for binary ' 'table columns of type {!r} (got {!r}). The invalid ' 'value will be ignored for the purpose of formatting ' 'the data in this column.'.format(format, null)) if msg is None: valid['null'] = null else: invalid['null'] = (null, msg) # Validate the disp option # TODO: Add full parsing and validation of TDISPn keywords if disp is not None and disp != '': msg = None if not isinstance(disp, str): msg = ( f'Column disp option (TDISPn) must be a string (got ' f'{disp!r}). The invalid value will be ignored for the ' 'purpose of formatting the data in this column.') elif (isinstance(format, _AsciiColumnFormat) and disp[0].upper() == 'L'): # disp is at least one character long and has the 'L' format # which is not recognized for ASCII tables msg = ( "Column disp option (TDISPn) may not use the 'L' format " "with ASCII table columns. The invalid value will be " "ignored for the purpose of formatting the data in this " "column.") if msg is None: try: _parse_tdisp_format(disp) valid['disp'] = disp except VerifyError as err: msg = ( f'Column disp option (TDISPn) failed verification: ' f'{err!s} The invalid value will be ignored for the ' 'purpose of formatting the data in this column.') invalid['disp'] = (disp, msg) else: invalid['disp'] = (disp, msg) # Validate the start option if start is not None and start != '': msg = None if not isinstance(format, _AsciiColumnFormat): # The 'start' option only applies to ASCII columns msg = ( 'Column start option (TBCOLn) is not allowed for binary ' 'table columns (got {!r}). The invalid keyword will be ' 'ignored for the purpose of formatting the data in this ' 'column.'.format(start)) else: try: start = int(start) except (TypeError, ValueError): pass if not _is_int(start) or start < 1: msg = ( 'Column start option (TBCOLn) must be a positive integer ' '(got {!r}). The invalid value will be ignored for the ' 'purpose of formatting the data in this column.'.format(start)) if msg is None: valid['start'] = start else: invalid['start'] = (start, msg) # Process TDIMn options # ASCII table columns can't have a TDIMn keyword associated with it; # for now we just issue a warning and ignore it. # TODO: This should be checked by the FITS verification code if dim is not None and dim != '': msg = None dims_tuple = tuple() # NOTE: If valid, the dim keyword's value in the the valid dict is # a tuple, not the original string; if invalid just the original # string is returned if isinstance(format, _AsciiColumnFormat): msg = ( 'Column dim option (TDIMn) is not allowed for ASCII table ' 'columns (got {!r}). The invalid keyword will be ignored ' 'for the purpose of formatting this column.'.format(dim)) elif isinstance(dim, str): dims_tuple = _parse_tdim(dim) elif isinstance(dim, tuple): dims_tuple = dim else: msg = ( "`dim` argument must be a string containing a valid value " "for the TDIMn header keyword associated with this column, " "or a tuple containing the C-order dimensions for the " "column. The invalid value will be ignored for the purpose " "of formatting this column.") if dims_tuple: if reduce(operator.mul, dims_tuple) > format.repeat: msg = ( "The repeat count of the column format {!r} for column {!r} " "is fewer than the number of elements per the TDIM " "argument {!r}. The invalid TDIMn value will be ignored " "for the purpose of formatting this column.".format( name, format, dim)) if msg is None: valid['dim'] = dims_tuple else: invalid['dim'] = (dim, msg) if coord_type is not None and coord_type != '': msg = None if not isinstance(coord_type, str): msg = ( "Coordinate/axis type option (TCTYPn) must be a string " "(got {!r}). The invalid keyword will be ignored for the " "purpose of formatting this column.".format(coord_type)) elif len(coord_type) > 8: msg = ( "Coordinate/axis type option (TCTYPn) must be a string " "of atmost 8 characters (got {!r}). The invalid keyword " "will be ignored for the purpose of formatting this " "column.".format(coord_type)) if msg is None: valid['coord_type'] = coord_type else: invalid['coord_type'] = (coord_type, msg) if coord_unit is not None and coord_unit != '': msg = None if not isinstance(coord_unit, str): msg = ( "Coordinate/axis unit option (TCUNIn) must be a string " "(got {!r}). The invalid keyword will be ignored for the " "purpose of formatting this column.".format(coord_unit)) if msg is None: valid['coord_unit'] = coord_unit else: invalid['coord_unit'] = (coord_unit, msg) for k, v in [('coord_ref_point', coord_ref_point), ('coord_ref_value', coord_ref_value), ('coord_inc', coord_inc)]: if v is not None and v != '': msg = None if not isinstance(v, numbers.Real): msg = ( "Column {} option ({}n) must be a real floating type (got {!r}). " "The invalid value will be ignored for the purpose of formatting " "the data in this column.".format(k, ATTRIBUTE_TO_KEYWORD[k], v)) if msg is None: valid[k] = v else: invalid[k] = (v, msg) if time_ref_pos is not None and time_ref_pos != '': msg = None if not isinstance(time_ref_pos, str): msg = ( "Time coordinate reference position option (TRPOSn) must be " "a string (got {!r}). The invalid keyword will be ignored for " "the purpose of formatting this column.".format(time_ref_pos)) if msg is None: valid['time_ref_pos'] = time_ref_pos else: invalid['time_ref_pos'] = (time_ref_pos, msg) return valid, invalid @classmethod def _determine_formats(cls, format, start, dim, ascii): """ Given a format string and whether or not the Column is for an ASCII table (ascii=None means unspecified, but lean toward binary table where ambiguous) create an appropriate _BaseColumnFormat instance for the column's format, and determine the appropriate recarray format. The values of the start and dim keyword arguments are also useful, as the former is only valid for ASCII tables and the latter only for BINARY tables. """ # If the given format string is unambiguously a Numpy dtype or one of # the Numpy record format type specifiers supported by Astropy then that # should take priority--otherwise assume it is a FITS format if isinstance(format, np.dtype): format, _, _ = _dtype_to_recformat(format) # check format if ascii is None and not isinstance(format, _BaseColumnFormat): # We're just give a string which could be either a Numpy format # code, or a format for a binary column array *or* a format for an # ASCII column array--there may be many ambiguities here. Try our # best to guess what the user intended. format, recformat = cls._guess_format(format, start, dim) elif not ascii and not isinstance(format, _BaseColumnFormat): format, recformat = cls._convert_format(format, _ColumnFormat) elif ascii and not isinstance(format, _AsciiColumnFormat): format, recformat = cls._convert_format(format, _AsciiColumnFormat) else: # The format is already acceptable and unambiguous recformat = format.recformat return format, recformat @classmethod def _guess_format(cls, format, start, dim): if start and dim: # This is impossible; this can't be a valid FITS column raise ValueError( 'Columns cannot have both a start (TCOLn) and dim ' '(TDIMn) option, since the former is only applies to ' 'ASCII tables, and the latter is only valid for binary ' 'tables.') elif start: # Only ASCII table columns can have a 'start' option guess_format = _AsciiColumnFormat elif dim: # Only binary tables can have a dim option guess_format = _ColumnFormat else: # If the format is *technically* a valid binary column format # (i.e. it has a valid format code followed by arbitrary # "optional" codes), but it is also strictly a valid ASCII # table format, then assume an ASCII table column was being # requested (the more likely case, after all). with suppress(VerifyError): format = _AsciiColumnFormat(format, strict=True) # A safe guess which reflects the existing behavior of previous # Astropy versions guess_format = _ColumnFormat try: format, recformat = cls._convert_format(format, guess_format) except VerifyError: # For whatever reason our guess was wrong (for example if we got # just 'F' that's not a valid binary format, but it an ASCII format # code albeit with the width/precision omitted guess_format = (_AsciiColumnFormat if guess_format is _ColumnFormat else _ColumnFormat) # If this fails too we're out of options--it is truly an invalid # format, or at least not supported format, recformat = cls._convert_format(format, guess_format) return format, recformat def _convert_to_valid_data_type(self, array): # Convert the format to a type we understand if isinstance(array, Delayed): return array elif array is None: return array else: format = self.format dims = self._dims if dims: shape = dims[:-1] if 'A' in format else dims shape = (len(array),) + shape array = array.reshape(shape) if 'P' in format or 'Q' in format: return array elif 'A' in format: if array.dtype.char in 'SU': if dims: # The 'last' dimension (first in the order given # in the TDIMn keyword itself) is the number of # characters in each string fsize = dims[-1] else: fsize = np.dtype(format.recformat).itemsize return chararray.array(array, itemsize=fsize, copy=False) else: return _convert_array(array, np.dtype(format.recformat)) elif 'L' in format: # boolean needs to be scaled back to storage values ('T', 'F') if array.dtype == np.dtype('bool'): return np.where(array == np.False_, ord('F'), ord('T')) else: return np.where(array == 0, ord('F'), ord('T')) elif 'X' in format: return _convert_array(array, np.dtype('uint8')) else: # Preserve byte order of the original array for now; see #77 numpy_format = array.dtype.byteorder + format.recformat # Handle arrays passed in as unsigned ints as pseudo-unsigned # int arrays; blatantly tacked in here for now--we need columns # to have explicit knowledge of whether they treated as # pseudo-unsigned bzeros = {2: np.uint16(2**15), 4: np.uint32(2**31), 8: np.uint64(2**63)} if (array.dtype.kind == 'u' and array.dtype.itemsize in bzeros and self.bscale in (1, None, '') and self.bzero == bzeros[array.dtype.itemsize]): # Basically the array is uint, has scale == 1.0, and the # bzero is the appropriate value for a pseudo-unsigned # integer of the input dtype, then go ahead and assume that # uint is assumed numpy_format = numpy_format.replace('i', 'u') self._pseudo_unsigned_ints = True # The .base here means we're dropping the shape information, # which is only used to format recarray fields, and is not # useful for converting input arrays to the correct data type dtype = np.dtype(numpy_format).base return _convert_array(array, dtype) class ColDefs(NotifierMixin): """ Column definitions class. It has attributes corresponding to the `Column` attributes (e.g. `ColDefs` has the attribute ``names`` while `Column` has ``name``). Each attribute in `ColDefs` is a list of corresponding attribute values from all `Column` objects. """ _padding_byte = '\x00' _col_format_cls = _ColumnFormat def __new__(cls, input, ascii=False): klass = cls if (hasattr(input, '_columns_type') and issubclass(input._columns_type, ColDefs)): klass = input._columns_type elif (hasattr(input, '_col_format_cls') and issubclass(input._col_format_cls, _AsciiColumnFormat)): klass = _AsciiColDefs if ascii: # force ASCII if this has been explicitly requested klass = _AsciiColDefs return object.__new__(klass) def __getnewargs__(self): return (self._arrays,) def __init__(self, input, ascii=False): """ Parameters ---------- input : sequence of `Column` or `ColDefs` or ndarray or `~numpy.recarray` An existing table HDU, an existing `ColDefs`, or any multi-field Numpy array or `numpy.recarray`. ascii : bool Use True to ensure that ASCII table columns are used. """ from .hdu.table import _TableBaseHDU from .fitsrec import FITS_rec if isinstance(input, ColDefs): self._init_from_coldefs(input) elif (isinstance(input, FITS_rec) and hasattr(input, '_coldefs') and input._coldefs): # If given a FITS_rec object we can directly copy its columns, but # only if its columns have already been defined, otherwise this # will loop back in on itself and blow up self._init_from_coldefs(input._coldefs) elif isinstance(input, np.ndarray) and input.dtype.fields is not None: # Construct columns from the fields of a record array self._init_from_array(input) elif isiterable(input): # if the input is a list of Columns self._init_from_sequence(input) elif isinstance(input, _TableBaseHDU): # Construct columns from fields in an HDU header self._init_from_table(input) else: raise TypeError('Input to ColDefs must be a table HDU, a list ' 'of Columns, or a record/field array.') # Listen for changes on all columns for col in self.columns: col._add_listener(self) def _init_from_coldefs(self, coldefs): """Initialize from an existing ColDefs object (just copy the columns and convert their formats if necessary). """ self.columns = [self._copy_column(col) for col in coldefs] def _init_from_sequence(self, columns): for idx, col in enumerate(columns): if not isinstance(col, Column): raise TypeError(f'Element {idx} in the ColDefs input is not a Column.') self._init_from_coldefs(columns) def _init_from_array(self, array): self.columns = [] for idx in range(len(array.dtype)): cname = array.dtype.names[idx] ftype = array.dtype.fields[cname][0] format = self._col_format_cls.from_recformat(ftype) # Determine the appropriate dimensions for items in the column dim = array.dtype[idx].shape[::-1] if dim and (len(dim) > 0 or 'A' in format): if 'A' in format: # should take into account multidimensional items in the column dimel = int(re.findall('[0-9]+', str(ftype.subdtype[0]))[0]) # n x m string arrays must include the max string # length in their dimensions (e.g. l x n x m) dim = (dimel,) + dim dim = '(' + ','.join(str(d) for d in dim) + ')' else: dim = None # Check for unsigned ints. bzero = None if ftype.base.kind == 'u': if 'I' in format: bzero = np.uint16(2**15) elif 'J' in format: bzero = np.uint32(2**31) elif 'K' in format: bzero = np.uint64(2**63) c = Column(name=cname, format=format, array=array.view(np.ndarray)[cname], bzero=bzero, dim=dim) self.columns.append(c) def _init_from_table(self, table): hdr = table._header nfields = hdr['TFIELDS'] # go through header keywords to pick out column definition keywords # definition dictionaries for each field col_keywords = [{} for i in range(nfields)] for keyword in hdr: key = TDEF_RE.match(keyword) try: label = key.group('label') except Exception: continue # skip if there is no match if label in KEYWORD_NAMES: col = int(key.group('num')) if 0 < col <= nfields: attr = KEYWORD_TO_ATTRIBUTE[label] value = hdr[keyword] if attr == 'format': # Go ahead and convert the format value to the # appropriate ColumnFormat container now value = self._col_format_cls(value) col_keywords[col - 1][attr] = value # Verify the column keywords and display any warnings if necessary; # we only want to pass on the valid keywords for idx, kwargs in enumerate(col_keywords): valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs) for val in invalid_kwargs.values(): warnings.warn( f'Invalid keyword for column {idx + 1}: {val[1]}', VerifyWarning) # Special cases for recformat and dim # TODO: Try to eliminate the need for these special cases del valid_kwargs['recformat'] if 'dim' in valid_kwargs: valid_kwargs['dim'] = kwargs['dim'] col_keywords[idx] = valid_kwargs # data reading will be delayed for col in range(nfields): col_keywords[col]['array'] = Delayed(table, col) # now build the columns self.columns = [Column(**attrs) for attrs in col_keywords] # Add the table HDU is a listener to changes to the columns # (either changes to individual columns, or changes to the set of # columns (add/remove/etc.)) self._add_listener(table) def __copy__(self): return self.__class__(self) def __deepcopy__(self, memo): return self.__class__([copy.deepcopy(c, memo) for c in self.columns]) def _copy_column(self, column): """Utility function used currently only by _init_from_coldefs to help convert columns from binary format to ASCII format or vice versa if necessary (otherwise performs a straight copy). """ if isinstance(column.format, self._col_format_cls): # This column has a FITS format compatible with this column # definitions class (that is ascii or binary) return column.copy() new_column = column.copy() # Try to use the Numpy recformat as the equivalency between the # two formats; if that conversion can't be made then these # columns can't be transferred # TODO: Catch exceptions here and raise an explicit error about # column format conversion new_column.format = self._col_format_cls.from_column_format(column.format) # Handle a few special cases of column format options that are not # compatible between ASCII an binary tables # TODO: This is sort of hacked in right now; we really need # separate classes for ASCII and Binary table Columns, and they # should handle formatting issues like these if not isinstance(new_column.format, _AsciiColumnFormat): # the column is a binary table column... new_column.start = None if new_column.null is not None: # We can't just "guess" a value to represent null # values in the new column, so just disable this for # now; users may modify it later new_column.null = None else: # the column is an ASCII table column... if new_column.null is not None: new_column.null = DEFAULT_ASCII_TNULL if (new_column.disp is not None and new_column.disp.upper().startswith('L')): # ASCII columns may not use the logical data display format; # for now just drop the TDISPn option for this column as we # don't have a systematic conversion of boolean data to ASCII # tables yet new_column.disp = None return new_column def __getattr__(self, name): """ Automatically returns the values for the given keyword attribute for all `Column`s in this list. Implements for example self.units, self.formats, etc. """ cname = name[:-1] if cname in KEYWORD_ATTRIBUTES and name[-1] == 's': attr = [] for col in self.columns: val = getattr(col, cname) attr.append(val if val is not None else '') return attr raise AttributeError(name) @lazyproperty def dtype(self): # Note: This previously returned a dtype that just used the raw field # widths based on the format's repeat count, and did not incorporate # field *shapes* as provided by TDIMn keywords. # Now this incorporates TDIMn from the start, which makes *this* method # a little more complicated, but simplifies code elsewhere (for example # fields will have the correct shapes even in the raw recarray). formats = [] offsets = [0] for format_, dim in zip(self.formats, self._dims): dt = format_.dtype if len(offsets) < len(self.formats): # Note: the size of the *original* format_ may be greater than # one would expect from the number of elements determined by # dim. The FITS format allows this--the rest of the field is # filled with undefined values. offsets.append(offsets[-1] + dt.itemsize) if dim: if format_.format == 'A': dt = np.dtype((dt.char + str(dim[-1]), dim[:-1])) else: dt = np.dtype((dt.base, dim)) formats.append(dt) return np.dtype({'names': self.names, 'formats': formats, 'offsets': offsets}) @lazyproperty def names(self): return [col.name for col in self.columns] @lazyproperty def formats(self): return [col.format for col in self.columns] @lazyproperty def _arrays(self): return [col.array for col in self.columns] @lazyproperty def _recformats(self): return [fmt.recformat for fmt in self.formats] @lazyproperty def _dims(self): """Returns the values of the TDIMn keywords parsed into tuples.""" return [col._dims for col in self.columns] def __getitem__(self, key): if isinstance(key, str): key = _get_index(self.names, key) x = self.columns[key] if _is_int(key): return x else: return ColDefs(x) def __len__(self): return len(self.columns) def __repr__(self): rep = 'ColDefs(' if hasattr(self, 'columns') and self.columns: # The hasattr check is mostly just useful in debugging sessions # where self.columns may not be defined yet rep += '\n ' rep += '\n '.join([repr(c) for c in self.columns]) rep += '\n' rep += ')' return rep def __add__(self, other, option='left'): if isinstance(other, Column): b = [other] elif isinstance(other, ColDefs): b = list(other.columns) else: raise TypeError('Wrong type of input.') if option == 'left': tmp = list(self.columns) + b else: tmp = b + list(self.columns) return ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right') def __sub__(self, other): if not isinstance(other, (list, tuple)): other = [other] _other = [_get_index(self.names, key) for key in other] indx = list(range(len(self))) for x in _other: indx.remove(x) tmp = [self[i] for i in indx] return ColDefs(tmp) def _update_column_attribute_changed(self, column, attr, old_value, new_value): """ Handle column attribute changed notifications from columns that are members of this `ColDefs`. `ColDefs` itself does not currently do anything with this, and just bubbles the notification up to any listening table HDUs that may need to update their headers, etc. However, this also informs the table of the numerical index of the column that changed. """ idx = 0 for idx, col in enumerate(self.columns): if col is column: break if attr == 'name': del self.names elif attr == 'format': del self.formats self._notify('column_attribute_changed', column, idx, attr, old_value, new_value) def add_col(self, column): """ Append one `Column` to the column definition. """ if not isinstance(column, Column): raise AssertionError # Ask the HDU object to load the data before we modify our columns self._notify('load_data') self._arrays.append(column.array) # Obliterate caches of certain things del self.dtype del self._recformats del self._dims del self.names del self.formats self.columns.append(column) # Listen for changes on the new column column._add_listener(self) # If this ColDefs is being tracked by a Table, inform the # table that its data is now invalid. self._notify('column_added', self, column) return self def del_col(self, col_name): """ Delete (the definition of) one `Column`. col_name : str or int The column's name or index """ # Ask the HDU object to load the data before we modify our columns self._notify('load_data') indx = _get_index(self.names, col_name) col = self.columns[indx] del self._arrays[indx] # Obliterate caches of certain things del self.dtype del self._recformats del self._dims del self.names del self.formats del self.columns[indx] col._remove_listener(self) # If this ColDefs is being tracked by a table HDU, inform the HDU (or # any other listeners) that the column has been removed # Just send a reference to self, and the index of the column that was # removed self._notify('column_removed', self, indx) return self def change_attrib(self, col_name, attrib, new_value): """ Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`. Parameters ---------- col_name : str or int The column name or index to change attrib : str The attribute name new_value : object The new value for the attribute """ setattr(self[col_name], attrib, new_value) def change_name(self, col_name, new_name): """ Change a `Column`'s name. Parameters ---------- col_name : str The current name of the column new_name : str The new name of the column """ if new_name != col_name and new_name in self.names: raise ValueError(f'New name {new_name} already exists.') else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit): """ Change a `Column`'s unit. Parameters ---------- col_name : str or int The column name or index new_unit : str The new unit for the column """ self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all', output=None): """ Get attribute(s) information of the column definition. Parameters ---------- attrib : str Can be one or more of the attributes listed in ``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is ``"all"`` which will print out all attributes. It forgives plurals and blanks. If there are two or more attribute names, they must be separated by comma(s). output : file-like, optional File-like object to output to. Outputs to stdout by default. If `False`, returns the attributes as a `dict` instead. Notes ----- This function doesn't return anything by default; it just prints to stdout. """ if output is None: output = sys.stdout if attrib.strip().lower() in ['all', '']: lst = KEYWORD_ATTRIBUTES else: lst = attrib.split(',') for idx in range(len(lst)): lst[idx] = lst[idx].strip().lower() if lst[idx][-1] == 's': lst[idx] = list[idx][:-1] ret = {} for attr in lst: if output: if attr not in KEYWORD_ATTRIBUTES: output.write("'{}' is not an attribute of the column " "definitions.\n".format(attr)) continue output.write(f"{attr}:\n") output.write(f" {getattr(self, attr + 's')}\n") else: ret[attr] = getattr(self, attr + 's') if not output: return ret class _AsciiColDefs(ColDefs): """ColDefs implementation for ASCII tables.""" _padding_byte = ' ' _col_format_cls = _AsciiColumnFormat def __init__(self, input, ascii=True): super().__init__(input) # if the format of an ASCII column has no width, add one if not isinstance(input, _AsciiColDefs): self._update_field_metrics() else: for idx, s in enumerate(input.starts): self.columns[idx].start = s self._spans = input.spans self._width = input._width @lazyproperty def dtype(self): dtype = {} for j in range(len(self)): data_type = 'S' + str(self.spans[j]) dtype[self.names[j]] = (data_type, self.starts[j] - 1) return np.dtype(dtype) @property def spans(self): """A list of the widths of each field in the table.""" return self._spans @lazyproperty def _recformats(self): if len(self) == 1: widths = [] else: widths = [y - x for x, y in pairwise(self.starts)] # Widths is the width of each field *including* any space between # fields; this is so that we can map the fields to string records in a # Numpy recarray widths.append(self._width - self.starts[-1] + 1) return ['a' + str(w) for w in widths] def add_col(self, column): super().add_col(column) self._update_field_metrics() def del_col(self, col_name): super().del_col(col_name) self._update_field_metrics() def _update_field_metrics(self): """ Updates the list of the start columns, the list of the widths of each field, and the total width of each record in the table. """ spans = [0] * len(self.columns) end_col = 0 # Refers to the ASCII text column, not the table col for idx, col in enumerate(self.columns): width = col.format.width # Update the start columns and column span widths taking into # account the case that the starting column of a field may not # be the column immediately after the previous field if not col.start: col.start = end_col + 1 end_col = col.start + width - 1 spans[idx] = width self._spans = spans self._width = end_col # Utilities class _VLF(np.ndarray): """Variable length field object.""" def __new__(cls, input, dtype='a'): """ Parameters ---------- input a sequence of variable-sized elements. """ if dtype == 'a': try: # this handles ['abc'] and [['a','b','c']] # equally, beautiful! input = [chararray.array(x, itemsize=1) for x in input] except Exception: raise ValueError( f'Inconsistent input data array: {input}') a = np.array(input, dtype=object) self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a, dtype=object) self.max = 0 self.element_dtype = dtype return self def __array_finalize__(self, obj): if obj is None: return self.max = obj.max self.element_dtype = obj.element_dtype def __setitem__(self, key, value): """ To make sure the new item has consistent data type to avoid misalignment. """ if isinstance(value, np.ndarray) and value.dtype == self.dtype: pass elif isinstance(value, chararray.chararray) and value.itemsize == 1: pass elif self.element_dtype == 'a': value = chararray.array(value, itemsize=1) else: value = np.array(value, dtype=self.element_dtype) np.ndarray.__setitem__(self, key, value) self.max = max(self.max, len(value)) def tolist(self): return [list(item) for item in super().tolist()] def _get_index(names, key): """ Get the index of the ``key`` in the ``names`` list. The ``key`` can be an integer or string. If integer, it is the index in the list. If string, a. Field (column) names are case sensitive: you can have two different columns called 'abc' and 'ABC' respectively. b. When you *refer* to a field (presumably with the field method), it will try to match the exact name first, so in the example in (a), field('abc') will get the first field, and field('ABC') will get the second field. If there is no exact name matched, it will try to match the name with case insensitivity. So, in the last example, field('Abc') will cause an exception since there is no unique mapping. If there is a field named "XYZ" and no other field name is a case variant of "XYZ", then field('xyz'), field('Xyz'), etc. will get this field. """ if _is_int(key): indx = int(key) elif isinstance(key, str): # try to find exact match first try: indx = names.index(key.rstrip()) except ValueError: # try to match case-insentively, _key = key.lower().rstrip() names = [n.lower().rstrip() for n in names] count = names.count(_key) # occurrence of _key in names if count == 1: indx = names.index(_key) elif count == 0: raise KeyError(f"Key '{key}' does not exist.") else: # multiple match raise KeyError(f"Ambiguous key name '{key}'.") else: raise KeyError(f"Illegal key '{key!r}'.") return indx def _unwrapx(input, output, repeat): """ Unwrap the X format column into a Boolean array. Parameters ---------- input input ``Uint8`` array of shape (`s`, `nbytes`) output output Boolean array of shape (`s`, `repeat`) repeat number of bits """ pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype='uint8') nbytes = ((repeat - 1) // 8) + 1 for i in range(nbytes): _min = i * 8 _max = min((i + 1) * 8, repeat) for j in range(_min, _max): output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8]) def _wrapx(input, output, repeat): """ Wrap the X format column Boolean array into an ``UInt8`` array. Parameters ---------- input input Boolean array of shape (`s`, `repeat`) output output ``Uint8`` array of shape (`s`, `nbytes`) repeat number of bits """ output[...] = 0 # reset the output nbytes = ((repeat - 1) // 8) + 1 unused = nbytes * 8 - repeat for i in range(nbytes): _min = i * 8 _max = min((i + 1) * 8, repeat) for j in range(_min, _max): if j != _min: np.left_shift(output[..., i], 1, output[..., i]) np.add(output[..., i], input[..., j], output[..., i]) # shift the unused bits np.left_shift(output[..., i], unused, output[..., i]) def _makep(array, descr_output, format, nrows=None): """ Construct the P (or Q) format column array, both the data descriptors and the data. It returns the output "data" array of data type `dtype`. The descriptor location will have a zero offset for all columns after this call. The final offset will be calculated when the file is written. Parameters ---------- array input object array descr_output output "descriptor" array of data type int32 (for P format arrays) or int64 (for Q format arrays)--must be nrows long in its first dimension format the _FormatP object representing the format of the variable array nrows : int, optional number of rows to create in the column; defaults to the number of rows in the input array """ # TODO: A great deal of this is redundant with FITS_rec._convert_p; see if # we can merge the two somehow. _offset = 0 if not nrows: nrows = len(array) data_output = _VLF([None] * nrows, dtype=format.dtype) if format.dtype == 'a': _nbytes = 1 else: _nbytes = np.array([], dtype=format.dtype).itemsize for idx in range(nrows): if idx < len(array): rowval = array[idx] else: if format.dtype == 'a': rowval = ' ' * data_output.max else: rowval = [0] * data_output.max if format.dtype == 'a': data_output[idx] = chararray.array(encode_ascii(rowval), itemsize=1) else: data_output[idx] = np.array(rowval, dtype=format.dtype) descr_output[idx, 0] = len(data_output[idx]) descr_output[idx, 1] = _offset _offset += len(data_output[idx]) * _nbytes return data_output def _parse_tformat(tform): """Parse ``TFORMn`` keyword for a binary table into a ``(repeat, format, option)`` tuple. """ try: (repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups() except Exception: # TODO: Maybe catch this error use a default type (bytes, maybe?) for # unrecognized column types. As long as we can determine the correct # byte width somehow.. raise VerifyError(f'Format {tform!r} is not recognized.') if repeat == '': repeat = 1 else: repeat = int(repeat) return (repeat, format.upper(), option) def _parse_ascii_tformat(tform, strict=False): """ Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width, precision)`` tuple (the latter is always zero unless format is one of 'E', 'F', or 'D'). """ match = TFORMAT_ASCII_RE.match(tform.strip()) if not match: raise VerifyError(f'Format {tform!r} is not recognized.') # Be flexible on case format = match.group('format') if format is None: # Floating point format format = match.group('formatf').upper() width = match.group('widthf') precision = match.group('precision') if width is None or precision is None: if strict: raise VerifyError('Format {!r} is not unambiguously an ASCII ' 'table format.') else: width = 0 if width is None else width precision = 1 if precision is None else precision else: format = format.upper() width = match.group('width') if width is None: if strict: raise VerifyError('Format {!r} is not unambiguously an ASCII ' 'table format.') else: # Just use a default width of 0 if unspecified width = 0 precision = 0 def convert_int(val): msg = ('Format {!r} is not valid--field width and decimal precision ' 'must be integers.') try: val = int(val) except (ValueError, TypeError): raise VerifyError(msg.format(tform)) return val if width and precision: # This should only be the case for floating-point formats width, precision = convert_int(width), convert_int(precision) elif width: # Just for integer/string formats; ignore precision width = convert_int(width) else: # For any format, if width was unspecified use the set defaults width, precision = ASCII_DEFAULT_WIDTHS[format] if width <= 0: raise VerifyError("Format {!r} not valid--field width must be a " "positive integeter.".format(tform)) if precision >= width: raise VerifyError("Format {!r} not valid--the number of decimal digits " "must be less than the format's total " "width {}.".format(tform, width)) return format, width, precision def _parse_tdim(tdim): """Parse the ``TDIM`` value into a tuple (may return an empty tuple if the value ``TDIM`` value is empty or invalid). """ m = tdim and TDIM_RE.match(tdim) if m: dims = m.group('dims') return tuple(int(d.strip()) for d in dims.split(','))[::-1] # Ignore any dim values that don't specify a multidimensional column return tuple() def _scalar_to_format(value): """ Given a scalar value or string, returns the minimum FITS column format that can represent that value. 'minimum' is defined by the order given in FORMATORDER. """ # First, if value is a string, try to convert to the appropriate scalar # value for type_ in (int, float, complex): try: value = type_(value) break except ValueError: continue numpy_dtype_str = np.min_scalar_type(value).str numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness try: fits_format = NUMPY2FITS[numpy_dtype_str] return FITSUPCONVERTERS.get(fits_format, fits_format) except KeyError: return "A" + str(len(value)) def _cmp_recformats(f1, f2): """ Compares two numpy recformats using the ordering given by FORMATORDER. """ if f1[0] == 'a' and f2[0] == 'a': return cmp(int(f1[1:]), int(f2[1:])) else: f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2] return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2)) def _convert_fits2record(format): """ Convert FITS format spec to record format spec. """ repeat, dtype, option = _parse_tformat(format) if dtype in FITS2NUMPY: if dtype == 'A': output_format = FITS2NUMPY[dtype] + str(repeat) # to accommodate both the ASCII table and binary table column # format spec, i.e. A7 in ASCII table is the same as 7A in # binary table, so both will produce 'a7'. # Technically the FITS standard does not allow this but it's a very # common mistake if format.lstrip()[0] == 'A' and option != '': # make sure option is integer output_format = FITS2NUMPY[dtype] + str(int(option)) else: repeat_str = '' if repeat != 1: repeat_str = str(repeat) output_format = repeat_str + FITS2NUMPY[dtype] elif dtype == 'X': output_format = _FormatX(repeat) elif dtype == 'P': output_format = _FormatP.from_tform(format) elif dtype == 'Q': output_format = _FormatQ.from_tform(format) elif dtype == 'F': output_format = 'f8' else: raise ValueError(f'Illegal format `{format}`.') return output_format def _convert_record2fits(format): """ Convert record format spec to FITS format spec. """ recformat, kind, dtype = _dtype_to_recformat(format) shape = dtype.shape itemsize = dtype.base.itemsize if dtype.char == 'U' or (dtype.subdtype is not None and dtype.subdtype[0].char == 'U'): # Unicode dtype--itemsize is 4 times actual ASCII character length, # which what matters for FITS column formats # Use dtype.base and dtype.subdtype --dtype for multi-dimensional items itemsize = itemsize // 4 option = str(itemsize) ndims = len(shape) repeat = 1 if ndims > 0: nel = np.array(shape, dtype='i8').prod() if nel > 1: repeat = nel if kind == 'a': # This is a kludge that will place string arrays into a # single field, so at least we won't lose data. Need to # use a TDIM keyword to fix this, declaring as (slength, # dim1, dim2, ...) as mwrfits does ntot = int(repeat) * int(option) output_format = str(ntot) + 'A' elif recformat in NUMPY2FITS: # record format if repeat != 1: repeat = str(repeat) else: repeat = '' output_format = repeat + NUMPY2FITS[recformat] else: raise ValueError(f'Illegal format `{format}`.') return output_format def _dtype_to_recformat(dtype): """ Utility function for converting a dtype object or string that instantiates a dtype (e.g. 'float32') into one of the two character Numpy format codes that have been traditionally used by Astropy. In particular, use of 'a' to refer to character data is long since deprecated in Numpy, but Astropy remains heavily invested in its use (something to try to get away from sooner rather than later). """ if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) kind = dtype.base.kind if kind in ('U', 'S'): recformat = kind = 'a' else: itemsize = dtype.base.itemsize recformat = kind + str(itemsize) return recformat, kind, dtype def _convert_format(format, reverse=False): """ Convert FITS format spec to record format spec. Do the opposite if reverse=True. """ if reverse: return _convert_record2fits(format) else: return _convert_fits2record(format) def _convert_ascii_format(format, reverse=False): """Convert ASCII table format spec to record format spec.""" if reverse: recformat, kind, dtype = _dtype_to_recformat(format) itemsize = dtype.itemsize if kind == 'a': return 'A' + str(itemsize) elif NUMPY2FITS.get(recformat) == 'L': # Special case for logical/boolean types--for ASCII tables we # represent these as single character columns containing 'T' or 'F' # (a la the storage format for Logical columns in binary tables) return 'A1' elif kind == 'i': # Use for the width the maximum required to represent integers # of that byte size plus 1 for signs, but use a minimum of the # default width (to keep with existing behavior) width = 1 + len(str(2 ** (itemsize * 8))) width = max(width, ASCII_DEFAULT_WIDTHS['I'][0]) return 'I' + str(width) elif kind == 'f': # This is tricky, but go ahead and use D if float-64, and E # if float-32 with their default widths if itemsize >= 8: format = 'D' else: format = 'E' width = '.'.join(str(w) for w in ASCII_DEFAULT_WIDTHS[format]) return format + width # TODO: There may be reasonable ways to represent other Numpy types so # let's see what other possibilities there are besides just 'a', 'i', # and 'f'. If it doesn't have a reasonable ASCII representation then # raise an exception else: format, width, precision = _parse_ascii_tformat(format) # This gives a sensible "default" dtype for a given ASCII # format code recformat = ASCII2NUMPY[format] # The following logic is taken from CFITSIO: # For integers, if the width <= 4 we can safely use 16-bit ints for all # values, if width >= 10 we may need to accommodate 64-bit ints. # values [for the non-standard J format code just always force 64-bit] if format == 'I': if width <= 4: recformat = 'i2' elif width > 9: recformat = 'i8' elif format == 'A': recformat += str(width) return recformat def _parse_tdisp_format(tdisp): """ Parse the ``TDISPn`` keywords for ASCII and binary tables into a ``(format, width, precision, exponential)`` tuple (the TDISP values for ASCII and binary are identical except for 'Lw', which is only present in BINTABLE extensions Parameters ---------- tdisp : str TDISPn FITS Header keyword. Used to specify display formatting. Returns ------- formatc: str The format characters from TDISPn width: str The width int value from TDISPn precision: str The precision int value from TDISPn exponential: str The exponential int value from TDISPn """ # Use appropriate regex for format type tdisp = tdisp.strip() fmt_key = tdisp[0] if tdisp[0] != 'E' or ( len(tdisp) > 1 and tdisp[1] not in 'NS') else tdisp[:2] try: tdisp_re = TDISP_RE_DICT[fmt_key] except KeyError: raise VerifyError(f'Format {tdisp} is not recognized.') match = tdisp_re.match(tdisp.strip()) if not match or match.group('formatc') is None: raise VerifyError(f'Format {tdisp} is not recognized.') formatc = match.group('formatc') width = match.group('width') precision = None exponential = None # Some formats have precision and exponential if tdisp[0] in ('I', 'B', 'O', 'Z', 'F', 'E', 'G', 'D'): precision = match.group('precision') if precision is None: precision = 1 if tdisp[0] in ('E', 'D', 'G') and tdisp[1] not in ('N', 'S'): exponential = match.group('exponential') if exponential is None: exponential = 1 # Once parsed, check format dict to do conversion to a formatting string return formatc, width, precision, exponential def _fortran_to_python_format(tdisp): """ Turn the TDISPn fortran format pieces into a final Python format string. See the format_type definitions above the TDISP_FMT_DICT. If codes is changed to take advantage of the exponential specification, will need to add it as another input parameter. Parameters ---------- tdisp : str TDISPn FITS Header keyword. Used to specify display formatting. Returns ------- format_string: str The TDISPn keyword string translated into a Python format string. """ format_type, width, precision, exponential = _parse_tdisp_format(tdisp) try: fmt = TDISP_FMT_DICT[format_type] return fmt.format(width=width, precision=precision) except KeyError: raise VerifyError(f'Format {format_type} is not recognized.') def python_to_tdisp(format_string, logical_dtype=False): """ Turn the Python format string to a TDISP FITS compliant format string. Not all formats convert. these will cause a Warning and return None. Parameters ---------- format_string : str TDISPn FITS Header keyword. Used to specify display formatting. logical_dtype : bool True is this format type should be a logical type, 'L'. Needs special handling. Returns ------- tdsip_string: str The TDISPn keyword string translated into a Python format string. """ fmt_to_tdisp = {'a': 'A', 's': 'A', 'd': 'I', 'b': 'B', 'o': 'O', 'x': 'Z', 'X': 'Z', 'f': 'F', 'F': 'F', 'g': 'G', 'G': 'G', 'e': 'E', 'E': 'E'} if format_string in [None, "", "{}"]: return None # Strip out extra format characters that aren't a type or a width/precision if format_string[0] == '{' and format_string != "{}": fmt_str = format_string.lstrip("{:").rstrip('}') elif format_string[0] == '%': fmt_str = format_string.lstrip("%") else: fmt_str = format_string precision, sep = '', '' # Character format, only translate right aligned, and don't take zero fills if fmt_str[-1].isdigit() and fmt_str[0] == '>' and fmt_str[1] != '0': ftype = fmt_to_tdisp['a'] width = fmt_str[1:] elif fmt_str[-1] == 's' and fmt_str != 's': ftype = fmt_to_tdisp['a'] width = fmt_str[:-1].lstrip('0') # Number formats, don't take zero fills elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != '0': ftype = fmt_to_tdisp[fmt_str[-1]] fmt_str = fmt_str[:-1] # If format has a "." split out the width and precision if '.' in fmt_str: width, precision = fmt_str.split('.') sep = '.' if width == "": ascii_key = ftype if ftype != 'G' else 'F' width = str(int(precision) + (ASCII_DEFAULT_WIDTHS[ascii_key][0] - ASCII_DEFAULT_WIDTHS[ascii_key][1])) # Otherwise we just have a width else: width = fmt_str else: warnings.warn('Format {} cannot be mapped to the accepted ' 'TDISPn keyword values. Format will not be ' 'moved into TDISPn keyword.'.format(format_string), AstropyUserWarning) return None # Catch logical data type, set the format type back to L in this case if logical_dtype: ftype = 'L' return ftype + width + sep + precision
1045c57a26df72578515253e89e81c75e79aca1744880675948c25f8f303b28d
# Licensed under a 3-clause BSD style license - see PYFITS.rst import copy import operator import warnings import weakref from contextlib import suppress from functools import reduce import numpy as np from numpy import char as chararray from .column import (ASCIITNULL, FITS2NUMPY, ASCII2NUMPY, ASCII2STR, ColDefs, _AsciiColDefs, _FormatX, _FormatP, _VLF, _get_index, _wrapx, _unwrapx, _makep, Delayed) from .util import decode_ascii, encode_ascii, _rstrip_inplace from astropy.utils import lazyproperty class FITS_record: """ FITS record class. `FITS_record` is used to access records of the `FITS_rec` object. This will allow us to deal with scaled columns. It also handles conversion/scaling of columns in ASCII tables. The `FITS_record` class expects a `FITS_rec` object as input. """ def __init__(self, input, row=0, start=None, end=None, step=None, base=None, **kwargs): """ Parameters ---------- input : array The array to wrap. row : int, optional The starting logical row of the array. start : int, optional The starting column in the row associated with this object. Used for subsetting the columns of the `FITS_rec` object. end : int, optional The ending column in the row associated with this object. Used for subsetting the columns of the `FITS_rec` object. """ self.array = input self.row = row if base: width = len(base) else: width = self.array._nfields s = slice(start, end, step).indices(width) self.start, self.end, self.step = s self.base = base def __getitem__(self, key): if isinstance(key, str): indx = _get_index(self.array.names, key) if indx < self.start or indx > self.end - 1: raise KeyError(f"Key '{key}' does not exist.") elif isinstance(key, slice): return type(self)(self.array, self.row, key.start, key.stop, key.step, self) else: indx = self._get_index(key) if indx > self.array._nfields - 1: raise IndexError('Index out of bounds') return self.array.field(indx)[self.row] def __setitem__(self, key, value): if isinstance(key, str): indx = _get_index(self.array.names, key) if indx < self.start or indx > self.end - 1: raise KeyError(f"Key '{key}' does not exist.") elif isinstance(key, slice): for indx in range(slice.start, slice.stop, slice.step): indx = self._get_indx(indx) self.array.field(indx)[self.row] = value else: indx = self._get_index(key) if indx > self.array._nfields - 1: raise IndexError('Index out of bounds') self.array.field(indx)[self.row] = value def __len__(self): return len(range(self.start, self.end, self.step)) def __repr__(self): """ Display a single row. """ outlist = [] for idx in range(len(self)): outlist.append(repr(self[idx])) return f"({', '.join(outlist)})" def field(self, field): """ Get the field data of the record. """ return self.__getitem__(field) def setfield(self, field, value): """ Set the field data of the record. """ self.__setitem__(field, value) @lazyproperty def _bases(self): bases = [weakref.proxy(self)] base = self.base while base: bases.append(base) base = base.base return bases def _get_index(self, indx): indices = np.ogrid[:self.array._nfields] for base in reversed(self._bases): if base.step < 1: s = slice(base.start, None, base.step) else: s = slice(base.start, base.end, base.step) indices = indices[s] return indices[indx] class FITS_rec(np.recarray): """ FITS record array class. `FITS_rec` is the data part of a table HDU's data part. This is a layer over the `~numpy.recarray`, so we can deal with scaled columns. It inherits all of the standard methods from `numpy.ndarray`. """ _record_type = FITS_record _character_as_bytes = False def __new__(subtype, input): """ Construct a FITS record array from a recarray. """ # input should be a record array if input.dtype.subdtype is None: self = np.recarray.__new__(subtype, input.shape, input.dtype, buf=input.data) else: self = np.recarray.__new__(subtype, input.shape, input.dtype, buf=input.data, strides=input.strides) self._init() if self.dtype.fields: self._nfields = len(self.dtype.fields) return self def __setstate__(self, state): meta = state[-1] column_state = state[-2] state = state[:-2] super().__setstate__(state) self._col_weakrefs = weakref.WeakSet() for attr, value in zip(meta, column_state): setattr(self, attr, value) def __reduce__(self): """ Return a 3-tuple for pickling a FITS_rec. Use the super-class functionality but then add in a tuple of FITS_rec-specific values that get used in __setstate__. """ reconst_func, reconst_func_args, state = super().__reduce__() # Define FITS_rec-specific attrs that get added to state column_state = [] meta = [] for attrs in ['_converted', '_heapoffset', '_heapsize', '_nfields', '_gap', '_uint', 'parnames', '_coldefs']: with suppress(AttributeError): # _coldefs can be Delayed, and file objects cannot be # picked, it needs to be deepcopied first if attrs == '_coldefs': column_state.append(self._coldefs.__deepcopy__(None)) else: column_state.append(getattr(self, attrs)) meta.append(attrs) state = state + (column_state, meta) return reconst_func, reconst_func_args, state def __array_finalize__(self, obj): if obj is None: return if isinstance(obj, FITS_rec): self._character_as_bytes = obj._character_as_bytes if isinstance(obj, FITS_rec) and obj.dtype == self.dtype: self._converted = obj._converted self._heapoffset = obj._heapoffset self._heapsize = obj._heapsize self._col_weakrefs = obj._col_weakrefs self._coldefs = obj._coldefs self._nfields = obj._nfields self._gap = obj._gap self._uint = obj._uint elif self.dtype.fields is not None: # This will allow regular ndarrays with fields, rather than # just other FITS_rec objects self._nfields = len(self.dtype.fields) self._converted = {} self._heapoffset = getattr(obj, '_heapoffset', 0) self._heapsize = getattr(obj, '_heapsize', 0) self._gap = getattr(obj, '_gap', 0) self._uint = getattr(obj, '_uint', False) self._col_weakrefs = weakref.WeakSet() self._coldefs = ColDefs(self) # Work around chicken-egg problem. Column.array relies on the # _coldefs attribute to set up ref back to parent FITS_rec; however # in the above line the self._coldefs has not been assigned yet so # this fails. This patches that up... for col in self._coldefs: del col.array col._parent_fits_rec = weakref.ref(self) else: self._init() def _init(self): """Initializes internal attributes specific to FITS-isms.""" self._nfields = 0 self._converted = {} self._heapoffset = 0 self._heapsize = 0 self._col_weakrefs = weakref.WeakSet() self._coldefs = None self._gap = 0 self._uint = False @classmethod def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False): """ Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec` object. .. note:: This was originally part of the ``new_table`` function in the table module but was moved into a class method since most of its functionality always had more to do with initializing a `FITS_rec` object than anything else, and much of it also overlapped with ``FITS_rec._scale_back``. Parameters ---------- columns : sequence of `Column` or a `ColDefs` The columns from which to create the table data. If these columns have data arrays attached that data may be used in initializing the new table. Otherwise the input columns will be used as a template for a new table with the requested number of rows. nrows : int Number of rows in the new table. If the input columns have data associated with them, the size of the largest input column is used. Otherwise the default is 0. fill : bool If `True`, will fill all cells with zeros or blanks. If `False`, copy the data from input, undefined cells will still be filled with zeros/blanks. """ if not isinstance(columns, ColDefs): columns = ColDefs(columns) # read the delayed data for column in columns: arr = column.array if isinstance(arr, Delayed): if arr.hdu.data is None: column.array = None else: column.array = _get_recarray_field(arr.hdu.data, arr.field) # Reset columns._arrays (which we may want to just do away with # altogether del columns._arrays # use the largest column shape as the shape of the record if nrows == 0: for arr in columns._arrays: if arr is not None: dim = arr.shape[0] else: dim = 0 if dim > nrows: nrows = dim raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8) raw_data.fill(ord(columns._padding_byte)) data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls) data._character_as_bytes = character_as_bytes # Previously this assignment was made from hdu.columns, but that's a # bug since if a _TableBaseHDU has a FITS_rec in its .data attribute # the _TableBaseHDU.columns property is actually returned from # .data._coldefs, so this assignment was circular! Don't make that # mistake again. # All of this is an artifact of the fragility of the FITS_rec class, # and that it can't just be initialized by columns... data._coldefs = columns # If fill is True we don't copy anything from the column arrays. We're # just using them as a template, and returning a table filled with # zeros/blanks if fill: return data # Otherwise we have to fill the recarray with data from the input # columns for idx, column in enumerate(columns): # For each column in the ColDef object, determine the number of # rows in that column. This will be either the number of rows in # the ndarray associated with the column, or the number of rows # given in the call to this function, which ever is smaller. If # the input FILL argument is true, the number of rows is set to # zero so that no data is copied from the original input data. arr = column.array if arr is None: array_size = 0 else: array_size = len(arr) n = min(array_size, nrows) # TODO: At least *some* of this logic is mostly redundant with the # _convert_foo methods in this class; see if we can eliminate some # of that duplication. if not n: # The input column had an empty array, so just use the fill # value continue field = _get_recarray_field(data, idx) name = column.name fitsformat = column.format recformat = fitsformat.recformat outarr = field[:n] inarr = arr[:n] if isinstance(recformat, _FormatX): # Data is a bit array if inarr.shape[-1] == recformat.repeat: _wrapx(inarr, outarr, recformat.repeat) continue elif isinstance(recformat, _FormatP): data._cache_field(name, _makep(inarr, field, recformat, nrows=nrows)) continue # TODO: Find a better way of determining that the column is meant # to be FITS L formatted elif recformat[-2:] == FITS2NUMPY['L'] and inarr.dtype == bool: # column is boolean # The raw data field should be filled with either 'T' or 'F' # (not 0). Use 'F' as a default field[:] = ord('F') # Also save the original boolean array in data._converted so # that it doesn't have to be re-converted converted = np.zeros(field.shape, dtype=bool) converted[:n] = inarr data._cache_field(name, converted) # TODO: Maybe this step isn't necessary at all if _scale_back # will handle it? inarr = np.where(inarr == np.False_, ord('F'), ord('T')) elif (columns[idx]._physical_values and columns[idx]._pseudo_unsigned_ints): # Temporary hack... bzero = column.bzero converted = np.zeros(field.shape, dtype=inarr.dtype) converted[:n] = inarr data._cache_field(name, converted) if n < nrows: # Pre-scale rows below the input data field[n:] = -bzero inarr = inarr - bzero elif isinstance(columns, _AsciiColDefs): # Regardless whether the format is character or numeric, if the # input array contains characters then it's already in the raw # format for ASCII tables if fitsformat._pseudo_logical: # Hack to support converting from 8-bit T/F characters # Normally the column array is a chararray of 1 character # strings, but we need to view it as a normal ndarray of # 8-bit ints to fill it with ASCII codes for 'T' and 'F' outarr = field.view(np.uint8, np.ndarray)[:n] elif arr.dtype.kind not in ('S', 'U'): # Set up views of numeric columns with the appropriate # numeric dtype # Fill with the appropriate blanks for the column format data._cache_field(name, np.zeros(nrows, dtype=arr.dtype)) outarr = data._converted[name][:n] outarr[:] = inarr continue if inarr.shape != outarr.shape: if (inarr.dtype.kind == outarr.dtype.kind and inarr.dtype.kind in ('U', 'S') and inarr.dtype != outarr.dtype): inarr_rowsize = inarr[0].size inarr = inarr.flatten().view(outarr.dtype) # This is a special case to handle input arrays with # non-trivial TDIMn. # By design each row of the outarray is 1-D, while each row of # the input array may be n-D if outarr.ndim > 1: # The normal case where the first dimension is the rows inarr_rowsize = inarr[0].size inarr = inarr.reshape(n, inarr_rowsize) outarr[:, :inarr_rowsize] = inarr else: # Special case for strings where the out array only has one # dimension (the second dimension is rolled up into the # strings outarr[:n] = inarr.ravel() else: outarr[:] = inarr # Now replace the original column array references with the new # fields # This is required to prevent the issue reported in # https://github.com/spacetelescope/PyFITS/issues/99 for idx in range(len(columns)): columns._arrays[idx] = data.field(idx) return data def __repr__(self): # Force use of the normal ndarray repr (rather than the new # one added for recarray in Numpy 1.10) for backwards compat return np.ndarray.__repr__(self) def __getattribute__(self, attr): # First, see if ndarray has this attr, and return it if so. Note that # this means a field with the same name as an ndarray attr cannot be # accessed by attribute, this is Numpy's default behavior. # We avoid using np.recarray.__getattribute__ here because after doing # this check it would access the columns without doing the conversions # that we need (with .field, see below). try: return object.__getattribute__(self, attr) except AttributeError: pass # attr might still be a fieldname. If we have column definitions, # we should access this via .field, as the data may have to be scaled. if self._coldefs is not None and attr in self.columns.names: return self.field(attr) # If not, just let the usual np.recarray override deal with it. return super().__getattribute__(attr) def __getitem__(self, key): if self._coldefs is None: return super().__getitem__(key) if isinstance(key, str): return self.field(key) # Have to view as a recarray then back as a FITS_rec, otherwise the # circular reference fix/hack in FITS_rec.field() won't preserve # the slice. out = self.view(np.recarray)[key] if type(out) is not np.recarray: # Oops, we got a single element rather than a view. In that case, # return a Record, which has no __getstate__ and is more efficient. return self._record_type(self, key) # We got a view; change it back to our class, and add stuff out = out.view(type(self)) out._uint = self._uint out._coldefs = ColDefs(self._coldefs) arrays = [] out._converted = {} for idx, name in enumerate(self._coldefs.names): # # Store the new arrays for the _coldefs object # arrays.append(self._coldefs._arrays[idx][key]) # Ensure that the sliced FITS_rec will view the same scaled # columns as the original; this is one of the few cases where # it is not necessary to use _cache_field() if name in self._converted: dummy = self._converted[name] field = np.ndarray.__getitem__(dummy, key) out._converted[name] = field out._coldefs._arrays = arrays return out def __setitem__(self, key, value): if self._coldefs is None: return super().__setitem__(key, value) if isinstance(key, str): self[key][:] = value return if isinstance(key, slice): end = min(len(self), key.stop or len(self)) end = max(0, end) start = max(0, key.start or 0) end = min(end, start + len(value)) for idx in range(start, end): self.__setitem__(idx, value[idx - start]) return if isinstance(value, FITS_record): for idx in range(self._nfields): self.field(self.names[idx])[key] = value.field(self.names[idx]) elif isinstance(value, (tuple, list, np.void)): if self._nfields == len(value): for idx in range(self._nfields): self.field(idx)[key] = value[idx] else: raise ValueError('Input tuple or list required to have {} ' 'elements.'.format(self._nfields)) else: raise TypeError('Assignment requires a FITS_record, tuple, or ' 'list as input.') def _ipython_key_completions_(self): return self.names def copy(self, order='C'): """ The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to `numpy.copy`. Differences include that it re-views the copied array as self's ndarray subclass, as though it were taking a slice; this means ``__array_finalize__`` is called and the copy shares all the array attributes (including ``._converted``!). So we need to make a deep copy of all those attributes so that the two arrays truly do not share any data. """ new = super().copy(order=order) new.__dict__ = copy.deepcopy(self.__dict__) return new @property def columns(self): """A user-visible accessor for the coldefs.""" return self._coldefs @property def _coldefs(self): # This used to be a normal internal attribute, but it was changed to a # property as a quick and transparent way to work around the reference # leak bug fixed in https://github.com/astropy/astropy/pull/4539 # # See the long comment in the Column.array property for more details # on this. But in short, FITS_rec now has a ._col_weakrefs attribute # which is a WeakSet of weakrefs to each Column in _coldefs. # # So whenever ._coldefs is set we also add each Column in the ColDefs # to the weakrefs set. This is an easy way to find out if a Column has # any references to it external to the FITS_rec (i.e. a user assigned a # column to a variable). If the column is still in _col_weakrefs then # there are other references to it external to this FITS_rec. We use # that information in __del__ to save off copies of the array data # for those columns to their Column.array property before our memory # is freed. return self.__dict__.get('_coldefs') @_coldefs.setter def _coldefs(self, cols): self.__dict__['_coldefs'] = cols if isinstance(cols, ColDefs): for col in cols.columns: self._col_weakrefs.add(col) @_coldefs.deleter def _coldefs(self): try: del self.__dict__['_coldefs'] except KeyError as exc: raise AttributeError(exc.args[0]) def __del__(self): try: del self._coldefs if self.dtype.fields is not None: for col in self._col_weakrefs: if col.array is not None: col.array = col.array.copy() # See issues #4690 and #4912 except (AttributeError, TypeError): # pragma: no cover pass @property def names(self): """List of column names.""" if self.dtype.fields: return list(self.dtype.names) elif getattr(self, '_coldefs', None) is not None: return self._coldefs.names else: return None @property def formats(self): """List of column FITS formats.""" if getattr(self, '_coldefs', None) is not None: return self._coldefs.formats return None @property def _raw_itemsize(self): """ Returns the size of row items that would be written to the raw FITS file, taking into account the possibility of unicode columns being compactified. Currently for internal use only. """ if _has_unicode_fields(self): total_itemsize = 0 for field in self.dtype.fields.values(): itemsize = field[0].itemsize if field[0].kind == 'U': itemsize = itemsize // 4 total_itemsize += itemsize return total_itemsize else: # Just return the normal itemsize return self.itemsize def field(self, key): """ A view of a `Column`'s data as an array. """ # NOTE: The *column* index may not be the same as the field index in # the recarray, if the column is a phantom column column = self.columns[key] name = column.name format = column.format if format.dtype.itemsize == 0: warnings.warn( 'Field {!r} has a repeat count of 0 in its format code, ' 'indicating an empty field.'.format(key)) return np.array([], dtype=format.dtype) # If field's base is a FITS_rec, we can run into trouble because it # contains a reference to the ._coldefs object of the original data; # this can lead to a circular reference; see ticket #49 base = self while (isinstance(base, FITS_rec) and isinstance(base.base, np.recarray)): base = base.base # base could still be a FITS_rec in some cases, so take care to # use rec.recarray.field to avoid a potential infinite # recursion field = _get_recarray_field(base, name) if name not in self._converted: recformat = format.recformat # TODO: If we're now passing the column to these subroutines, do we # really need to pass them the recformat? if isinstance(recformat, _FormatP): # for P format converted = self._convert_p(column, field, recformat) else: # Handle all other column data types which are fixed-width # fields converted = self._convert_other(column, field, recformat) # Note: Never assign values directly into the self._converted dict; # always go through self._cache_field; this way self._converted is # only used to store arrays that are not already direct views of # our own data. self._cache_field(name, converted) return converted return self._converted[name] def _cache_field(self, name, field): """ Do not store fields in _converted if one of its bases is self, or if it has a common base with self. This results in a reference cycle that cannot be broken since ndarrays do not participate in cyclic garbage collection. """ base = field while True: self_base = self while True: if self_base is base: return if getattr(self_base, 'base', None) is not None: self_base = self_base.base else: break if getattr(base, 'base', None) is not None: base = base.base else: break self._converted[name] = field def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value): """ Update how the data is formatted depending on changes to column attributes initiated by the user through the `Column` interface. Dispatches column attribute change notifications to individual methods for each attribute ``_update_column_<attr>`` """ method_name = f'_update_column_{attr}' if hasattr(self, method_name): # Right now this is so we can be lazy and not implement updaters # for every attribute yet--some we may not need at all, TBD getattr(self, method_name)(column, idx, old_value, new_value) def _update_column_name(self, column, idx, old_name, name): """Update the dtype field names when a column name is changed.""" dtype = self.dtype # Updating the names on the dtype should suffice dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1:] def _convert_x(self, field, recformat): """Convert a raw table column to a bit array as specified by the FITS X format. """ dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_) _unwrapx(field, dummy, recformat.repeat) return dummy def _convert_p(self, column, field, recformat): """Convert a raw table column of FITS P or Q format descriptors to a VLA column with the array data returned from the heap. """ dummy = _VLF([None] * len(self), dtype=recformat.dtype) raw_data = self._get_raw_data() if raw_data is None: raise OSError( "Could not find heap data for the {!r} variable-length " "array column.".format(column.name)) for idx in range(len(self)): offset = field[idx, 1] + self._heapoffset count = field[idx, 0] if recformat.dtype == 'a': dt = np.dtype(recformat.dtype + str(1)) arr_len = count * dt.itemsize da = raw_data[offset:offset + arr_len].view(dt) da = np.char.array(da.view(dtype=dt), itemsize=count) dummy[idx] = decode_ascii(da) else: dt = np.dtype(recformat.dtype) arr_len = count * dt.itemsize dummy[idx] = raw_data[offset:offset + arr_len].view(dt) dummy[idx].dtype = dummy[idx].dtype.newbyteorder('>') # Each array in the field may now require additional # scaling depending on the other scaling parameters # TODO: The same scaling parameters apply to every # array in the column so this is currently very slow; we # really only need to check once whether any scaling will # be necessary and skip this step if not # TODO: Test that this works for X format; I don't think # that it does--the recformat variable only applies to the P # format not the X format dummy[idx] = self._convert_other(column, dummy[idx], recformat) return dummy def _convert_ascii(self, column, field): """ Special handling for ASCII table columns to convert columns containing numeric types to actual numeric arrays from the string representation. """ format = column.format recformat = getattr(format, 'recformat', ASCII2NUMPY[format[0]]) # if the string = TNULL, return ASCIITNULL nullval = str(column.null).strip().encode('ascii') if len(nullval) > format.width: nullval = nullval[:format.width] # Before using .replace make sure that any trailing bytes in each # column are filled with spaces, and *not*, say, nulls; this causes # functions like replace to potentially leave gibberish bytes in the # array buffer. dummy = np.char.ljust(field, format.width) dummy = np.char.replace(dummy, encode_ascii('D'), encode_ascii('E')) null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width)) # Convert all fields equal to the TNULL value (nullval) to empty fields. # TODO: These fields really should be converted to NaN or something else undefined. # Currently they are converted to empty fields, which are then set to zero. dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy) # always replace empty fields, see https://github.com/astropy/astropy/pull/5394 if nullval != b'': dummy = np.where(np.char.strip(dummy) == b'', null_fill, dummy) try: dummy = np.array(dummy, dtype=recformat) except ValueError as exc: indx = self.names.index(column.name) raise ValueError( '{}; the header may be missing the necessary TNULL{} ' 'keyword or the table contains invalid data'.format( exc, indx + 1)) return dummy def _convert_other(self, column, field, recformat): """Perform conversions on any other fixed-width column data types. This may not perform any conversion at all if it's not necessary, in which case the original column array is returned. """ if isinstance(recformat, _FormatX): # special handling for the X format return self._convert_x(field, recformat) (_str, _bool, _number, _scale, _zero, bscale, bzero, dim) = \ self._get_scale_factors(column) indx = self.names.index(column.name) # ASCII table, convert strings to numbers # TODO: # For now, check that these are ASCII columns by checking the coldefs # type; in the future all columns (for binary tables, ASCII tables, or # otherwise) should "know" what type they are already and how to handle # converting their data from FITS format to native format and vice # versa... if not _str and isinstance(self._coldefs, _AsciiColDefs): field = self._convert_ascii(column, field) # Test that the dimensions given in dim are sensible; otherwise # display a warning and ignore them if dim: # See if the dimensions already match, if not, make sure the # number items will fit in the specified dimensions if field.ndim > 1: actual_shape = field.shape[1:] if _str: actual_shape = actual_shape + (field.itemsize,) else: actual_shape = field.shape[0] if dim == actual_shape: # The array already has the correct dimensions, so we # ignore dim and don't convert dim = None else: nitems = reduce(operator.mul, dim) if _str: actual_nitems = field.itemsize elif len(field.shape) == 1: # No repeat count in TFORMn, equivalent to 1 actual_nitems = 1 else: actual_nitems = field.shape[1] if nitems > actual_nitems: warnings.warn( 'TDIM{} value {:d} does not fit with the size of ' 'the array items ({:d}). TDIM{:d} will be ignored.' .format(indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1)) dim = None # further conversion for both ASCII and binary tables # For now we've made columns responsible for *knowing* whether their # data has been scaled, but we make the FITS_rec class responsible for # actually doing the scaling # TODO: This also needs to be fixed in the effort to make Columns # responsible for scaling their arrays to/from FITS native values if not column.ascii and column.format.p_format: format_code = column.format.p_format else: # TODO: Rather than having this if/else it might be nice if the # ColumnFormat class had an attribute guaranteed to give the format # of actual values in a column regardless of whether the true # format is something like P or Q format_code = column.format.format if (_number and (_scale or _zero) and not column._physical_values): # This is to handle pseudo unsigned ints in table columns # TODO: For now this only really works correctly for binary tables # Should it work for ASCII tables as well? if self._uint: if bzero == 2**15 and format_code == 'I': field = np.array(field, dtype=np.uint16) elif bzero == 2**31 and format_code == 'J': field = np.array(field, dtype=np.uint32) elif bzero == 2**63 and format_code == 'K': field = np.array(field, dtype=np.uint64) bzero64 = np.uint64(2 ** 63) else: field = np.array(field, dtype=np.float64) else: field = np.array(field, dtype=np.float64) if _scale: np.multiply(field, bscale, field) if _zero: if self._uint and format_code == 'K': # There is a chance of overflow, so be careful test_overflow = field.copy() try: test_overflow += bzero64 except OverflowError: warnings.warn( "Overflow detected while applying TZERO{:d}. " "Returning unscaled data.".format(indx + 1)) else: field = test_overflow else: field += bzero # mark the column as scaled column._physical_values = True elif _bool and field.dtype != bool: field = np.equal(field, ord('T')) elif _str: if not self._character_as_bytes: with suppress(UnicodeDecodeError): field = decode_ascii(field) if dim: # Apply the new field item dimensions nitems = reduce(operator.mul, dim) if field.ndim > 1: field = field[:, :nitems] if _str: fmt = field.dtype.char dtype = (f'|{fmt}{dim[-1]}', dim[:-1]) field.dtype = dtype else: field.shape = (field.shape[0],) + dim return field def _get_heap_data(self): """ Returns a pointer into the table's raw data to its heap (if present). This is returned as a numpy byte array. """ if self._heapsize: raw_data = self._get_raw_data().view(np.ubyte) heap_end = self._heapoffset + self._heapsize return raw_data[self._heapoffset:heap_end] else: return np.array([], dtype=np.ubyte) def _get_raw_data(self): """ Returns the base array of self that "raw data array" that is the array in the format that it was first read from a file before it was sliced or viewed as a different type in any way. This is determined by walking through the bases until finding one that has at least the same number of bytes as self, plus the heapsize. This may be the immediate .base but is not always. This is used primarily for variable-length array support which needs to be able to find the heap (the raw data *may* be larger than nbytes + heapsize if it contains a gap or padding). May return ``None`` if no array resembling the "raw data" according to the stated criteria can be found. """ raw_data_bytes = self.nbytes + self._heapsize base = self while hasattr(base, 'base') and base.base is not None: base = base.base if hasattr(base, 'nbytes') and base.nbytes >= raw_data_bytes: return base def _get_scale_factors(self, column): """Get all the scaling flags and factors for one column.""" # TODO: Maybe this should be a method/property on Column? Or maybe # it's not really needed at all... _str = column.format.format == 'A' _bool = column.format.format == 'L' _number = not (_bool or _str) bscale = column.bscale bzero = column.bzero _scale = bscale not in ('', None, 1) _zero = bzero not in ('', None, 0) # ensure bscale/bzero are numbers if not _scale: bscale = 1 if not _zero: bzero = 0 # column._dims gives a tuple, rather than column.dim which returns the # original string format code from the FITS header... dim = column._dims return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim) def _scale_back(self, update_heap_pointers=True): """ Update the parent array, using the (latest) scaled array. If ``update_heap_pointers`` is `False`, this will leave all the heap pointers in P/Q columns as they are verbatim--it only makes sense to do this if there is already data on the heap and it can be guaranteed that that data has not been modified, and there is not new data to add to the heap. Currently this is only used as an optimization for CompImageHDU that does its own handling of the heap. """ # Running total for the new heap size heapsize = 0 for indx, name in enumerate(self.dtype.names): column = self._coldefs[indx] recformat = column.format.recformat raw_field = _get_recarray_field(self, indx) # add the location offset of the heap area for each # variable length column if isinstance(recformat, _FormatP): # Irritatingly, this can return a different dtype than just # doing np.dtype(recformat.dtype); but this returns the results # that we want. For example if recformat.dtype is 'a' we want # an array of characters. dtype = np.array([], dtype=recformat.dtype).dtype if update_heap_pointers and name in self._converted: # The VLA has potentially been updated, so we need to # update the array descriptors raw_field[:] = 0 # reset npts = [len(arr) for arr in self._converted[name]] raw_field[:len(npts), 0] = npts raw_field[1:, 1] = (np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize) raw_field[:, 1][:] += heapsize heapsize += raw_field[:, 0].sum() * dtype.itemsize # Even if this VLA has not been read or updated, we need to # include the size of its constituent arrays in the heap size # total if heapsize >= 2**31: raise ValueError("The heapsize limit for 'P' format " "has been reached. " "Please consider using the 'Q' format " "for your file.") if isinstance(recformat, _FormatX) and name in self._converted: _wrapx(self._converted[name], raw_field, recformat.repeat) continue _str, _bool, _number, _scale, _zero, bscale, bzero, _ = \ self._get_scale_factors(column) field = self._converted.get(name, raw_field) # conversion for both ASCII and binary tables if _number or _str: if _number and (_scale or _zero) and column._physical_values: dummy = field.copy() if _zero: dummy -= bzero if _scale: dummy /= bscale # This will set the raw values in the recarray back to # their non-physical storage values, so the column should # be mark is not scaled column._physical_values = False elif _str or isinstance(self._coldefs, _AsciiColDefs): dummy = field else: continue # ASCII table, convert numbers to strings if isinstance(self._coldefs, _AsciiColDefs): self._scale_back_ascii(indx, dummy, raw_field) # binary table string column elif isinstance(raw_field, chararray.chararray): self._scale_back_strings(indx, dummy, raw_field) # all other binary table columns else: if len(raw_field) and isinstance(raw_field[0], np.integer): dummy = np.around(dummy) if raw_field.shape == dummy.shape: raw_field[:] = dummy else: # Reshaping the data is necessary in cases where the # TDIMn keyword was used to shape a column's entries # into arrays raw_field[:] = dummy.ravel().view(raw_field.dtype) del dummy # ASCII table does not have Boolean type elif _bool and name in self._converted: choices = (np.array([ord('F')], dtype=np.int8)[0], np.array([ord('T')], dtype=np.int8)[0]) raw_field[:] = np.choose(field, choices) # Store the updated heapsize self._heapsize = heapsize def _scale_back_strings(self, col_idx, input_field, output_field): # There are a few possibilities this has to be able to handle properly # The input_field, which comes from the _converted column is of dtype # 'Un' so that elements read out of the array are normal str # objects (i.e. unicode strings) # # At the other end the *output_field* may also be of type 'S' or of # type 'U'. It will *usually* be of type 'S' because when reading # an existing FITS table the raw data is just ASCII strings, and # represented in Numpy as an S array. However, when a user creates # a new table from scratch, they *might* pass in a column containing # unicode strings (dtype 'U'). Therefore the output_field of the # raw array is actually a unicode array. But we still want to make # sure the data is encodable as ASCII. Later when we write out the # array we use, in the dtype 'U' case, a different write routine # that writes row by row and encodes any 'U' columns to ASCII. # If the output_field is non-ASCII we will worry about ASCII encoding # later when writing; otherwise we can do it right here if input_field.dtype.kind == 'U' and output_field.dtype.kind == 'S': try: _ascii_encode(input_field, out=output_field) except _UnicodeArrayEncodeError as exc: raise ValueError( "Could not save column '{}': Contains characters that " "cannot be encoded as ASCII as required by FITS, starting " "at the index {!r} of the column, and the index {} of " "the string at that location.".format( self._coldefs[col_idx].name, exc.index[0] if len(exc.index) == 1 else exc.index, exc.start)) else: # Otherwise go ahead and do a direct copy into--if both are type # 'U' we'll handle encoding later input_field = input_field.flatten().view(output_field.dtype) output_field.flat[:] = input_field # Ensure that blanks at the end of each string are # converted to nulls instead of spaces, see Trac #15 # and #111 _rstrip_inplace(output_field) def _scale_back_ascii(self, col_idx, input_field, output_field): """ Convert internal array values back to ASCII table representation. The ``input_field`` is the internal representation of the values, and the ``output_field`` is the character array representing the ASCII output that will be written. """ starts = self._coldefs.starts[:] spans = self._coldefs.spans format = self._coldefs[col_idx].format # The the index of the "end" column of the record, beyond # which we can't write end = super().field(-1).itemsize starts.append(end + starts[-1]) if col_idx > 0: lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1] else: lead = 0 if lead < 0: warnings.warn('Column {!r} starting point overlaps the previous ' 'column.'.format(col_idx + 1)) trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx] if trail < 0: warnings.warn('Column {!r} ending point overlaps the next ' 'column.'.format(col_idx + 1)) # TODO: It would be nice if these string column formatting # details were left to a specialized class, as is the case # with FormatX and FormatP if 'A' in format: _pc = '{:' else: _pc = '{:>' fmt = ''.join([_pc, format[1:], ASCII2STR[format[0]], '}', (' ' * trail)]) # Even if the format precision is 0, we should output a decimal point # as long as there is space to do so--not including a decimal point in # a float value is discouraged by the FITS Standard trailing_decimal = (format.precision == 0 and format.format in ('F', 'E', 'D')) # not using numarray.strings's num2char because the # result is not allowed to expand (as C/Python does). for jdx, value in enumerate(input_field): value = fmt.format(value) if len(value) > starts[col_idx + 1] - starts[col_idx]: raise ValueError( "Value {!r} does not fit into the output's itemsize of " "{}.".format(value, spans[col_idx])) if trailing_decimal and value[0] == ' ': # We have some extra space in the field for the trailing # decimal point value = value[1:] + '.' output_field[jdx] = value # Replace exponent separator in floating point numbers if 'D' in format: output_field[:] = output_field.replace(b'E', b'D') def tolist(self): # Override .tolist to take care of special case of VLF column_lists = [self[name].tolist() for name in self.columns.names] return [list(row) for row in zip(*column_lists)] def _get_recarray_field(array, key): """ Compatibility function for using the recarray base class's field method. This incorporates the legacy functionality of returning string arrays as Numeric-style chararray objects. """ # Numpy >= 1.10.dev recarray no longer returns chararrays for strings # This is currently needed for backwards-compatibility and for # automatic truncation of trailing whitespace field = np.recarray.field(array, key) if (field.dtype.char in ('S', 'U') and not isinstance(field, chararray.chararray)): field = field.view(chararray.chararray) return field class _UnicodeArrayEncodeError(UnicodeEncodeError): def __init__(self, encoding, object_, start, end, reason, index): super().__init__(encoding, object_, start, end, reason) self.index = index def _ascii_encode(inarray, out=None): """ Takes a unicode array and fills the output string array with the ASCII encodings (if possible) of the elements of the input array. The two arrays must be the same size (though not necessarily the same shape). This is like an inplace version of `np.char.encode` though simpler since it's only limited to ASCII, and hence the size of each character is guaranteed to be 1 byte. If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is just a `UnicodeEncodeError` with an additional attribute for the index of the item that couldn't be encoded. """ out_dtype = np.dtype((f'S{inarray.dtype.itemsize // 4}', inarray.dtype.shape)) if out is not None: out = out.view(out_dtype) op_dtypes = [inarray.dtype, out_dtype] op_flags = [['readonly'], ['writeonly', 'allocate']] it = np.nditer([inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=['zerosize_ok']) try: for initem, outitem in it: outitem[...] = initem.item().encode('ascii') except UnicodeEncodeError as exc: index = np.unravel_index(it.iterindex, inarray.shape) raise _UnicodeArrayEncodeError(*(exc.args + (index,))) return it.operands[1] def _has_unicode_fields(array): """ Returns True if any fields in a structured array have Unicode dtype. """ dtypes = (d[0] for d in array.dtype.fields.values()) return any(d.kind == 'U' for d in dtypes)
9529cb91c6421b8bd015321aa469c3f986a6d9eeb5565d761641e9774252b90e
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import itertools import os import re import shutil import sys import warnings import numpy as np from . import compressed from .base import _BaseHDU, _ValidHDU, _NonstandardHDU, ExtensionHDU from .groups import GroupsHDU from .image import PrimaryHDU, ImageHDU from astropy.io.fits.file import _File, FILE_MODES from astropy.io.fits.header import _pad_length from astropy.io.fits.util import (_free_space_check, _get_array_mmap, _is_int, _tmp_name, fileobj_closed, fileobj_mode, ignore_sigint, isfile) from astropy.io.fits.verify import _Verify, _ErrList, VerifyError, VerifyWarning from astropy.utils import indent from astropy.utils.exceptions import AstropyUserWarning # NOTE: Python can be built without bz2. from astropy.utils.compat.optional_deps import HAS_BZ2 if HAS_BZ2: import bz2 __all__ = ["HDUList", "fitsopen"] # FITS file signature as per RFC 4047 FITS_SIGNATURE = b'SIMPLE = T' def fitsopen(name, mode='readonly', memmap=None, save_backup=False, cache=True, lazy_load_hdus=None, ignore_missing_simple=False, **kwargs): """Factory function to open a FITS file and return an `HDUList` object. Parameters ---------- name : str, file-like or `pathlib.Path` File to be opened. mode : str, optional Open mode, 'readonly', 'update', 'append', 'denywrite', or 'ostream'. Default is 'readonly'. If ``name`` is a file object that is already opened, ``mode`` must match the mode the file was opened with, readonly (rb), update (rb+), append (ab+), ostream (w), denywrite (rb)). memmap : bool, optional Is memory mapping to be used? This value is obtained from the configuration item ``astropy.io.fits.Conf.use_memmap``. Default is `True`. save_backup : bool, optional If the file was opened in update or append mode, this ensures that a backup of the original file is saved before any changes are flushed. The backup has the same name as the original file with ".bak" appended. If "file.bak" already exists then "file.bak.1" is used, and so on. Default is `False`. cache : bool, optional If the file name is a URL, `~astropy.utils.data.download_file` is used to open the file. This specifies whether or not to save the file locally in Astropy's download cache. Default is `True`. lazy_load_hdus : bool, optional To avoid reading all the HDUs and headers in a FITS file immediately upon opening. This is an optimization especially useful for large files, as FITS has no way of determining the number and offsets of all the HDUs in a file without scanning through the file and reading all the headers. Default is `True`. To disable lazy loading and read all HDUs immediately (the old behavior) use ``lazy_load_hdus=False``. This can lead to fewer surprises--for example with lazy loading enabled, ``len(hdul)`` can be slow, as it means the entire FITS file needs to be read in order to determine the number of HDUs. ``lazy_load_hdus=False`` ensures that all HDUs have already been loaded after the file has been opened. .. versionadded:: 1.3 uint : bool, optional Interpret signed integer data where ``BZERO`` is the central value and ``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as ``uint16`` data. Default is `True` so that the pseudo-unsigned integer convention is assumed. ignore_missing_end : bool, optional Do not raise an exception when opening a file that is missing an ``END`` card in the last header. Default is `False`. ignore_missing_simple : bool, optional Do not raise an exception when the SIMPLE keyword is missing. Note that io.fits will raise a warning if a SIMPLE card is present but written in a way that does not follow the FITS Standard. Default is `False`. .. versionadded:: 4.2 checksum : bool, str, optional If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values (when present in the HDU header) match the header and data of all HDU's in the file. Updates to a file that already has a checksum will preserve and update the existing checksums unless this argument is given a value of 'remove', in which case the CHECKSUM and DATASUM values are not checked, and are removed when saving changes to the file. Default is `False`. disable_image_compression : bool, optional If `True`, treats compressed image HDU's like normal binary table HDU's. Default is `False`. do_not_scale_image_data : bool, optional If `True`, image data is not scaled using BSCALE/BZERO values when read. Default is `False`. character_as_bytes : bool, optional Whether to return bytes for string columns, otherwise unicode strings are returned, but this does not respect memory mapping and loads the whole column in memory when accessed. Default is `False`. ignore_blank : bool, optional If `True`, the BLANK keyword is ignored if present. Default is `False`. scale_back : bool, optional If `True`, when saving changes to a file that contained scaled image data, restore the data to the original type and reapply the original BSCALE/BZERO values. This could lead to loss of accuracy if scaling back to integer values after performing floating point operations on the data. Default is `False`. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. Returns ------- hdulist : `HDUList` `HDUList` containing all of the header data units in the file. """ from astropy.io.fits import conf if memmap is None: # distinguish between True (kwarg explicitly set) # and None (preference for memmap in config, might be ignored) memmap = None if conf.use_memmap else False else: memmap = bool(memmap) if lazy_load_hdus is None: lazy_load_hdus = conf.lazy_load_hdus else: lazy_load_hdus = bool(lazy_load_hdus) if 'uint' not in kwargs: kwargs['uint'] = conf.enable_uint if not name: raise ValueError(f'Empty filename: {name!r}') return HDUList.fromfile(name, mode, memmap, save_backup, cache, lazy_load_hdus, ignore_missing_simple, **kwargs) class HDUList(list, _Verify): """ HDU list class. This is the top-level FITS object. When a FITS file is opened, a `HDUList` object is returned. """ def __init__(self, hdus=[], file=None): """ Construct a `HDUList` object. Parameters ---------- hdus : BaseHDU or sequence thereof, optional The HDU object(s) to comprise the `HDUList`. Should be instances of HDU classes like `ImageHDU` or `BinTableHDU`. file : file-like, bytes, optional The opened physical file associated with the `HDUList` or a bytes object containing the contents of the FITS file. """ if isinstance(file, bytes): self._data = file self._file = None else: self._file = file self._data = None # For internal use only--the keyword args passed to fitsopen / # HDUList.fromfile/string when opening the file self._open_kwargs = {} self._in_read_next_hdu = False # If we have read all the HDUs from the file or not # The assumes that all HDUs have been written when we first opened the # file; we do not currently support loading additional HDUs from a file # while it is being streamed to. In the future that might be supported # but for now this is only used for the purpose of lazy-loading of # existing HDUs. if file is None: self._read_all = True elif self._file is not None: # Should never attempt to read HDUs in ostream mode self._read_all = self._file.mode == 'ostream' else: self._read_all = False if hdus is None: hdus = [] # can take one HDU, as well as a list of HDU's as input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList, list)): raise TypeError("Invalid input for HDUList.") for idx, hdu in enumerate(hdus): if not isinstance(hdu, _BaseHDU): raise TypeError(f"Element {idx} in the HDUList input is not an HDU.") super().__init__(hdus) if file is None: # Only do this when initializing from an existing list of HDUs # When initializing from a file, this will be handled by the # append method after the first HDU is read self.update_extend() def __len__(self): if not self._in_read_next_hdu: self.readall() return super().__len__() def __repr__(self): # In order to correctly repr an HDUList we need to load all the # HDUs as well self.readall() return super().__repr__() def __iter__(self): # While effectively this does the same as: # for idx in range(len(self)): # yield self[idx] # the more complicated structure is here to prevent the use of len(), # which would break the lazy loading for idx in itertools.count(): try: yield self[idx] except IndexError: break def __getitem__(self, key): """ Get an HDU from the `HDUList`, indexed by number or name. """ # If the key is a slice we need to make sure the necessary HDUs # have been loaded before passing the slice on to super. if isinstance(key, slice): max_idx = key.stop # Check for and handle the case when no maximum was # specified (e.g. [1:]). if max_idx is None: # We need all of the HDUs, so load them # and reset the maximum to the actual length. max_idx = len(self) # Just in case the max_idx is negative... max_idx = self._positive_index_of(max_idx) number_loaded = super().__len__() if max_idx >= number_loaded: # We need more than we have, try loading up to and including # max_idx. Note we do not try to be clever about skipping HDUs # even though key.step might conceivably allow it. for i in range(number_loaded, max_idx): # Read until max_idx or to the end of the file, whichever # comes first. if not self._read_next_hdu(): break try: hdus = super().__getitem__(key) except IndexError as e: # Raise a more helpful IndexError if the file was not fully read. if self._read_all: raise e else: raise IndexError('HDU not found, possibly because the index ' 'is out of range, or because the file was ' 'closed before all HDUs were read') else: return HDUList(hdus) # Originally this used recursion, but hypothetically an HDU with # a very large number of HDUs could blow the stack, so use a loop # instead try: return self._try_while_unread_hdus(super().__getitem__, self._positive_index_of(key)) except IndexError as e: # Raise a more helpful IndexError if the file was not fully read. if self._read_all: raise e else: raise IndexError('HDU not found, possibly because the index ' 'is out of range, or because the file was ' 'closed before all HDUs were read') def __contains__(self, item): """ Returns `True` if ``item`` is an ``HDU`` _in_ ``self`` or a valid extension specification (e.g., integer extension number, extension name, or a tuple of extension name and an extension version) of a ``HDU`` in ``self``. """ try: self._try_while_unread_hdus(self.index_of, item) except (KeyError, ValueError): return False return True def __setitem__(self, key, hdu): """ Set an HDU to the `HDUList`, indexed by number or name. """ _key = self._positive_index_of(key) if isinstance(hdu, (slice, list)): if _is_int(_key): raise ValueError('An element in the HDUList must be an HDU.') for item in hdu: if not isinstance(item, _BaseHDU): raise ValueError(f'{item} is not an HDU.') else: if not isinstance(hdu, _BaseHDU): raise ValueError(f'{hdu} is not an HDU.') try: self._try_while_unread_hdus(super().__setitem__, _key, hdu) except IndexError: raise IndexError(f'Extension {key} is out of bound or not found.') self._resize = True self._truncate = False def __delitem__(self, key): """ Delete an HDU from the `HDUList`, indexed by number or name. """ if isinstance(key, slice): end_index = len(self) else: key = self._positive_index_of(key) end_index = len(self) - 1 self._try_while_unread_hdus(super().__delitem__, key) if (key == end_index or key == -1 and not self._resize): self._truncate = True else: self._truncate = False self._resize = True # Support the 'with' statement def __enter__(self): return self def __exit__(self, type, value, traceback): output_verify = self._open_kwargs.get('output_verify', 'exception') self.close(output_verify=output_verify) @classmethod def fromfile(cls, fileobj, mode=None, memmap=None, save_backup=False, cache=True, lazy_load_hdus=True, ignore_missing_simple=False, **kwargs): """ Creates an `HDUList` instance from a file-like object. The actual implementation of ``fitsopen()``, and generally shouldn't be used directly. Use :func:`open` instead (and see its documentation for details of the parameters accepted by this method). """ return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap, save_backup=save_backup, cache=cache, ignore_missing_simple=ignore_missing_simple, lazy_load_hdus=lazy_load_hdus, **kwargs) @classmethod def fromstring(cls, data, **kwargs): """ Creates an `HDUList` instance from a string or other in-memory data buffer containing an entire FITS file. Similar to :meth:`HDUList.fromfile`, but does not accept the mode or memmap arguments, as they are only relevant to reading from a file on disk. This is useful for interfacing with other libraries such as CFITSIO, and may also be useful for streaming applications. Parameters ---------- data : str, buffer-like, etc. A string or other memory buffer containing an entire FITS file. Buffer-like objects include :class:`~bytes`, :class:`~bytearray`, :class:`~memoryview`, and :class:`~numpy.ndarray`. It should be noted that if that memory is read-only (such as a Python string) the returned :class:`HDUList`'s data portions will also be read-only. **kwargs : dict Optional keyword arguments. See :func:`astropy.io.fits.open` for details. Returns ------- hdul : HDUList An :class:`HDUList` object representing the in-memory FITS file. """ try: # Test that the given object supports the buffer interface by # ensuring an ndarray can be created from it np.ndarray((), dtype='ubyte', buffer=data) except TypeError: raise TypeError( 'The provided object {} does not contain an underlying ' 'memory buffer. fromstring() requires an object that ' 'supports the buffer interface such as bytes, buffer, ' 'memoryview, ndarray, etc. This restriction is to ensure ' 'that efficient access to the array/table data is possible.' ''.format(data)) return cls._readfrom(data=data, **kwargs) def fileinfo(self, index): """ Returns a dictionary detailing information about the locations of the indexed HDU within any associated file. The values are only valid after a read or write of the associated file with no intervening changes to the `HDUList`. Parameters ---------- index : int Index of HDU for which info is to be returned. Returns ------- fileinfo : dict or None The dictionary details information about the locations of the indexed HDU within an associated file. Returns `None` when the HDU is not associated with a file. Dictionary contents: ========== ======================================================== Key Value ========== ======================================================== file File object associated with the HDU filename Name of associated file object filemode Mode in which the file was opened (readonly, update, append, denywrite, ostream) resized Flag that when `True` indicates that the data has been resized since the last read/write so the returned values may not be valid. hdrLoc Starting byte location of header in file datLoc Starting byte location of data block in file datSpan Data size including padding ========== ======================================================== """ if self._file is not None: output = self[index].fileinfo() if not output: # OK, the HDU associated with this index is not yet # tied to the file associated with the HDUList. The only way # to get the file object is to check each of the HDU's in the # list until we find the one associated with the file. f = None for hdu in self: info = hdu.fileinfo() if info: f = info['file'] fm = info['filemode'] break output = {'file': f, 'filemode': fm, 'hdrLoc': None, 'datLoc': None, 'datSpan': None} output['filename'] = self._file.name output['resized'] = self._wasresized() else: output = None return output def __copy__(self): """ Return a shallow copy of an HDUList. Returns ------- copy : `HDUList` A shallow copy of this `HDUList` object. """ return self[:] # Syntactic sugar for `__copy__()` magic method copy = __copy__ def __deepcopy__(self, memo=None): return HDUList([hdu.copy() for hdu in self]) def pop(self, index=-1): """ Remove an item from the list and return it. Parameters ---------- index : int, str, tuple of (string, int), optional An integer value of ``index`` indicates the position from which ``pop()`` removes and returns an HDU. A string value or a tuple of ``(string, int)`` functions as a key for identifying the HDU to be removed and returned. If ``key`` is a tuple, it is of the form ``(key, ver)`` where ``ver`` is an ``EXTVER`` value that must match the HDU being searched for. If the key is ambiguous (e.g. there are multiple 'SCI' extensions) the first match is returned. For a more precise match use the ``(name, ver)`` pair. If even the ``(name, ver)`` pair is ambiguous the numeric index must be used to index the duplicate HDU. Returns ------- hdu : BaseHDU The HDU object at position indicated by ``index`` or having name and version specified by ``index``. """ # Make sure that HDUs are loaded before attempting to pop self.readall() list_index = self.index_of(index) return super().pop(list_index) def insert(self, index, hdu): """ Insert an HDU into the `HDUList` at the given ``index``. Parameters ---------- index : int Index before which to insert the new HDU. hdu : BaseHDU The HDU object to insert """ if not isinstance(hdu, _BaseHDU): raise ValueError(f'{hdu} is not an HDU.') num_hdus = len(self) if index == 0 or num_hdus == 0: if num_hdus != 0: # We are inserting a new Primary HDU so we need to # make the current Primary HDU into an extension HDU. if isinstance(self[0], GroupsHDU): raise ValueError( "The current Primary HDU is a GroupsHDU. " "It can't be made into an extension HDU, " "so another HDU cannot be inserted before it.") hdu1 = ImageHDU(self[0].data, self[0].header) # Insert it into position 1, then delete HDU at position 0. super().insert(1, hdu1) super().__delitem__(0) if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)): # You passed in an Extension HDU but we need a Primary HDU. # If you provided an ImageHDU then we can convert it to # a primary HDU and use that. if isinstance(hdu, ImageHDU): hdu = PrimaryHDU(hdu.data, hdu.header) else: # You didn't provide an ImageHDU so we create a # simple Primary HDU and append that first before # we append the new Extension HDU. phdu = PrimaryHDU() super().insert(0, phdu) index = 1 else: if isinstance(hdu, GroupsHDU): raise ValueError('A GroupsHDU must be inserted as a ' 'Primary HDU.') if isinstance(hdu, PrimaryHDU): # You passed a Primary HDU but we need an Extension HDU # so create an Extension HDU from the input Primary HDU. hdu = ImageHDU(hdu.data, hdu.header) super().insert(index, hdu) hdu._new = True self._resize = True self._truncate = False # make sure the EXTEND keyword is in primary HDU if there is extension self.update_extend() def append(self, hdu): """ Append a new HDU to the `HDUList`. Parameters ---------- hdu : BaseHDU HDU to add to the `HDUList`. """ if not isinstance(hdu, _BaseHDU): raise ValueError('HDUList can only append an HDU.') if len(self) > 0: if isinstance(hdu, GroupsHDU): raise ValueError( "Can't append a GroupsHDU to a non-empty HDUList") if isinstance(hdu, PrimaryHDU): # You passed a Primary HDU but we need an Extension HDU # so create an Extension HDU from the input Primary HDU. # TODO: This isn't necessarily sufficient to copy the HDU; # _header_offset and friends need to be copied too. hdu = ImageHDU(hdu.data, hdu.header) else: if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)): # You passed in an Extension HDU but we need a Primary # HDU. # If you provided an ImageHDU then we can convert it to # a primary HDU and use that. if isinstance(hdu, ImageHDU): hdu = PrimaryHDU(hdu.data, hdu.header) else: # You didn't provide an ImageHDU so we create a # simple Primary HDU and append that first before # we append the new Extension HDU. phdu = PrimaryHDU() super().append(phdu) super().append(hdu) hdu._new = True self._resize = True self._truncate = False # make sure the EXTEND keyword is in primary HDU if there is extension self.update_extend() def index_of(self, key): """ Get the index of an HDU from the `HDUList`. Parameters ---------- key : int, str, tuple of (string, int) or BaseHDU The key identifying the HDU. If ``key`` is a tuple, it is of the form ``(name, ver)`` where ``ver`` is an ``EXTVER`` value that must match the HDU being searched for. If the key is ambiguous (e.g. there are multiple 'SCI' extensions) the first match is returned. For a more precise match use the ``(name, ver)`` pair. If even the ``(name, ver)`` pair is ambiguous (it shouldn't be but it's not impossible) the numeric index must be used to index the duplicate HDU. When ``key`` is an HDU object, this function returns the index of that HDU object in the ``HDUList``. Returns ------- index : int The index of the HDU in the `HDUList`. Raises ------ ValueError If ``key`` is an HDU object and it is not found in the ``HDUList``. KeyError If an HDU specified by the ``key`` that is an extension number, extension name, or a tuple of extension name and version is not found in the ``HDUList``. """ if _is_int(key): return key elif isinstance(key, tuple): _key, _ver = key elif isinstance(key, _BaseHDU): return self.index(key) else: _key = key _ver = None if not isinstance(_key, str): raise KeyError( '{} indices must be integers, extension names as strings, ' 'or (extname, version) tuples; got {}' ''.format(self.__class__.__name__, _key)) _key = (_key.strip()).upper() found = None for idx, hdu in enumerate(self): name = hdu.name if isinstance(name, str): name = name.strip().upper() # 'PRIMARY' should always work as a reference to the first HDU if ((name == _key or (_key == 'PRIMARY' and idx == 0)) and (_ver is None or _ver == hdu.ver)): found = idx break if (found is None): raise KeyError(f'Extension {key!r} not found.') else: return found def _positive_index_of(self, key): """ Same as index_of, but ensures always returning a positive index or zero. (Really this should be called non_negative_index_of but it felt too long.) This means that if the key is a negative integer, we have to convert it to the corresponding positive index. This means knowing the length of the HDUList, which in turn means loading all HDUs. Therefore using negative indices on HDULists is inherently inefficient. """ index = self.index_of(key) if index >= 0: return index if abs(index) > len(self): raise IndexError( f'Extension {index} is out of bound or not found.') return len(self) + index def readall(self): """ Read data of all HDUs into memory. """ while self._read_next_hdu(): pass @ignore_sigint def flush(self, output_verify='fix', verbose=False): """ Force a write of the `HDUList` back to the file (for append and update modes only). Parameters ---------- output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. verbose : bool When `True`, print verbose messages """ if self._file.mode not in ('append', 'update', 'ostream'): warnings.warn("Flush for '{}' mode is not supported." .format(self._file.mode), AstropyUserWarning) return save_backup = self._open_kwargs.get('save_backup', False) if save_backup and self._file.mode in ('append', 'update'): filename = self._file.name if os.path.exists(filename): # The the file doesn't actually exist anymore for some reason # then there's no point in trying to make a backup backup = filename + '.bak' idx = 1 while os.path.exists(backup): backup = filename + '.bak.' + str(idx) idx += 1 warnings.warn('Saving a backup of {} to {}.'.format( filename, backup), AstropyUserWarning) try: shutil.copy(filename, backup) except OSError as exc: raise OSError('Failed to save backup to destination {}: ' '{}'.format(filename, exc)) self.verify(option=output_verify) if self._file.mode in ('append', 'ostream'): for hdu in self: if verbose: try: extver = str(hdu._header['extver']) except KeyError: extver = '' # only append HDU's which are "new" if hdu._new: hdu._prewriteto(checksum=hdu._output_checksum) with _free_space_check(self): hdu._writeto(self._file) if verbose: print('append HDU', hdu.name, extver) hdu._new = False hdu._postwriteto() elif self._file.mode == 'update': self._flush_update() def update_extend(self): """ Make sure that if the primary header needs the keyword ``EXTEND`` that it has it and it is correct. """ if not len(self): return if not isinstance(self[0], PrimaryHDU): # A PrimaryHDU will be automatically inserted at some point, but it # might not have been added yet return hdr = self[0].header def get_first_ext(): try: return self[1] except IndexError: return None if 'EXTEND' in hdr: if not hdr['EXTEND'] and get_first_ext() is not None: hdr['EXTEND'] = True elif get_first_ext() is not None: if hdr['NAXIS'] == 0: hdr.set('EXTEND', True, after='NAXIS') else: n = hdr['NAXIS'] hdr.set('EXTEND', True, after='NAXIS' + str(n)) def writeto(self, fileobj, output_verify='exception', overwrite=False, checksum=False): """ Write the `HDUList` to a new file. Parameters ---------- fileobj : str, file-like or `pathlib.Path` File to write to. If a file object, must be opened in a writeable mode. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. checksum : bool When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the headers of all HDU's written to the file. """ if (len(self) == 0): warnings.warn("There is nothing to write.", AstropyUserWarning) return self.verify(option=output_verify) # make sure the EXTEND keyword is there if there is extension self.update_extend() # make note of whether the input file object is already open, in which # case we should not close it after writing (that should be the job # of the caller) closed = isinstance(fileobj, str) or fileobj_closed(fileobj) mode = FILE_MODES[fileobj_mode(fileobj)] if isfile(fileobj) else 'ostream' # This can accept an open file object that's open to write only, or in # append/update modes but only if the file doesn't exist. fileobj = _File(fileobj, mode=mode, overwrite=overwrite) hdulist = self.fromfile(fileobj) try: dirname = os.path.dirname(hdulist._file.name) except (AttributeError, TypeError): dirname = None try: with _free_space_check(self, dirname=dirname): for hdu in self: hdu._prewriteto(checksum=checksum) hdu._writeto(hdulist._file) hdu._postwriteto() finally: hdulist.close(output_verify=output_verify, closed=closed) def close(self, output_verify='exception', verbose=False, closed=True): """ Close the associated FITS file and memmap object, if any. Parameters ---------- output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. verbose : bool When `True`, print out verbose messages. closed : bool When `True`, close the underlying file object. """ try: if (self._file and self._file.mode in ('append', 'update') and not self._file.closed): self.flush(output_verify=output_verify, verbose=verbose) finally: if self._file and closed and hasattr(self._file, 'close'): self._file.close() # Give individual HDUs an opportunity to do on-close cleanup for hdu in self: hdu._close(closed=closed) def info(self, output=None): """ Summarize the info of the HDUs in this `HDUList`. Note that this function prints its results to the console---it does not return a value. Parameters ---------- output : file-like or bool, optional A file-like object to write the output to. If `False`, does not output to a file and instead returns a list of tuples representing the HDU info. Writes to ``sys.stdout`` by default. """ if output is None: output = sys.stdout if self._file is None: name = '(No file associated with this HDUList)' else: name = self._file.name results = [f'Filename: {name}', 'No. Name Ver Type Cards Dimensions Format'] format = '{:3d} {:10} {:3} {:11} {:5d} {} {} {}' default = ('', '', '', 0, (), '', '') for idx, hdu in enumerate(self): summary = hdu._summary() if len(summary) < len(default): summary += default[len(summary):] summary = (idx,) + summary if output: results.append(format.format(*summary)) else: results.append(summary) if output: output.write('\n'.join(results)) output.write('\n') output.flush() else: return results[2:] def filename(self): """ Return the file name associated with the HDUList object if one exists. Otherwise returns None. Returns ------- filename : str A string containing the file name associated with the HDUList object if an association exists. Otherwise returns None. """ if self._file is not None: if hasattr(self._file, 'name'): return self._file.name return None @classmethod def _readfrom(cls, fileobj=None, data=None, mode=None, memmap=None, cache=True, lazy_load_hdus=True, ignore_missing_simple=False, **kwargs): """ Provides the implementations from HDUList.fromfile and HDUList.fromstring, both of which wrap this method, as their implementations are largely the same. """ if fileobj is not None: if not isinstance(fileobj, _File): # instantiate a FITS file object (ffo) fileobj = _File(fileobj, mode=mode, memmap=memmap, cache=cache) # The Astropy mode is determined by the _File initializer if the # supplied mode was None mode = fileobj.mode hdulist = cls(file=fileobj) else: if mode is None: # The default mode mode = 'readonly' hdulist = cls(file=data) # This method is currently only called from HDUList.fromstring and # HDUList.fromfile. If fileobj is None then this must be the # fromstring case; the data type of ``data`` will be checked in the # _BaseHDU.fromstring call. if (not ignore_missing_simple and hdulist._file and hdulist._file.mode != 'ostream' and hdulist._file.size > 0): pos = hdulist._file.tell() # FITS signature is supposed to be in the first 30 bytes, but to # allow reading various invalid files we will check in the first # card (80 bytes). simple = hdulist._file.read(80) match_sig = (simple[:29] == FITS_SIGNATURE[:-1] and simple[29:30] in (b'T', b'F')) if not match_sig: # Check the SIMPLE card is there but not written correctly match_sig_relaxed = re.match(rb"SIMPLE\s*=\s*[T|F]", simple) if match_sig_relaxed: warnings.warn("Found a SIMPLE card but its format doesn't" " respect the FITS Standard", VerifyWarning) else: if hdulist._file.close_on_error: hdulist._file.close() raise OSError( 'No SIMPLE card found, this file does not appear to ' 'be a valid FITS file. If this is really a FITS file, ' 'try with ignore_missing_simple=True') hdulist._file.seek(pos) # Store additional keyword args that were passed to fits.open hdulist._open_kwargs = kwargs if fileobj is not None and fileobj.writeonly: # Output stream--not interested in reading/parsing # the HDUs--just writing to the output file return hdulist # Make sure at least the PRIMARY HDU can be read read_one = hdulist._read_next_hdu() # If we're trying to read only and no header units were found, # raise an exception if not read_one and mode in ('readonly', 'denywrite'): # Close the file if necessary (issue #6168) if hdulist._file.close_on_error: hdulist._file.close() raise OSError('Empty or corrupt FITS file') if not lazy_load_hdus or kwargs.get('checksum') is True: # Go ahead and load all HDUs while hdulist._read_next_hdu(): pass # initialize/reset attributes to be used in "update/append" mode hdulist._resize = False hdulist._truncate = False return hdulist def _try_while_unread_hdus(self, func, *args, **kwargs): """ Attempt an operation that accesses an HDU by index/name that can fail if not all HDUs have been read yet. Keep reading HDUs until the operation succeeds or there are no more HDUs to read. """ while True: try: return func(*args, **kwargs) except Exception: if self._read_next_hdu(): continue else: raise def _read_next_hdu(self): """ Lazily load a single HDU from the fileobj or data string the `HDUList` was opened from, unless no further HDUs are found. Returns True if a new HDU was loaded, or False otherwise. """ if self._read_all: return False saved_compression_enabled = compressed.COMPRESSION_ENABLED fileobj, data, kwargs = self._file, self._data, self._open_kwargs if fileobj is not None and fileobj.closed: return False try: self._in_read_next_hdu = True if ('disable_image_compression' in kwargs and kwargs['disable_image_compression']): compressed.COMPRESSION_ENABLED = False # read all HDUs try: if fileobj is not None: try: # Make sure we're back to the end of the last read # HDU if len(self) > 0: last = self[len(self) - 1] if last._data_offset is not None: offset = last._data_offset + last._data_size fileobj.seek(offset, os.SEEK_SET) hdu = _BaseHDU.readfrom(fileobj, **kwargs) except EOFError: self._read_all = True return False except OSError: # Close the file: see # https://github.com/astropy/astropy/issues/6168 # if self._file.close_on_error: self._file.close() if fileobj.writeonly: self._read_all = True return False else: raise else: if not data: self._read_all = True return False hdu = _BaseHDU.fromstring(data, **kwargs) self._data = data[hdu._data_offset + hdu._data_size:] super().append(hdu) if len(self) == 1: # Check for an extension HDU and update the EXTEND # keyword of the primary HDU accordingly self.update_extend() hdu._new = False if 'checksum' in kwargs: hdu._output_checksum = kwargs['checksum'] # check in the case there is extra space after the last HDU or # corrupted HDU except (VerifyError, ValueError) as exc: warnings.warn( 'Error validating header for HDU #{} (note: Astropy ' 'uses zero-based indexing).\n{}\n' 'There may be extra bytes after the last HDU or the ' 'file is corrupted.'.format( len(self), indent(str(exc))), VerifyWarning) del exc self._read_all = True return False finally: compressed.COMPRESSION_ENABLED = saved_compression_enabled self._in_read_next_hdu = False return True def _verify(self, option='warn'): errs = _ErrList([], unit='HDU') # the first (0th) element must be a primary HDU if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and \ (not isinstance(self[0], _NonstandardHDU)): err_text = "HDUList's 0th element is not a primary HDU." fix_text = 'Fixed by inserting one as 0th HDU.' def fix(self=self): self.insert(0, PrimaryHDU()) err = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) errs.append(err) if len(self) > 1 and ('EXTEND' not in self[0].header or self[0].header['EXTEND'] is not True): err_text = ('Primary HDU does not contain an EXTEND keyword ' 'equal to T even though there are extension HDUs.') fix_text = 'Fixed by inserting or updating the EXTEND keyword.' def fix(header=self[0].header): naxis = header['NAXIS'] if naxis == 0: after = 'NAXIS' else: after = 'NAXIS' + str(naxis) header.set('EXTEND', value=True, after=after) errs.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # each element calls their own verify for idx, hdu in enumerate(self): if idx > 0 and (not isinstance(hdu, ExtensionHDU)): err_text = f"HDUList's element {str(idx)} is not an extension HDU." err = self.run_option(option, err_text=err_text, fixable=False) errs.append(err) else: result = hdu._verify(option) if result: errs.append(result) return errs def _flush_update(self): """Implements flushing changes to a file in update mode.""" for hdu in self: # Need to all _prewriteto() for each HDU first to determine if # resizing will be necessary hdu._prewriteto(checksum=hdu._output_checksum, inplace=True) try: self._wasresized() # if the HDUList is resized, need to write out the entire contents of # the hdulist to the file. if self._resize or self._file.compression: self._flush_resize() else: # if not resized, update in place for hdu in self: hdu._writeto(self._file, inplace=True) # reset the modification attributes after updating for hdu in self: hdu._header._modified = False finally: for hdu in self: hdu._postwriteto() def _flush_resize(self): """ Implements flushing changes in update mode when parts of one or more HDU need to be resized. """ old_name = self._file.name old_memmap = self._file.memmap name = _tmp_name(old_name) if not self._file.file_like: old_mode = os.stat(old_name).st_mode # The underlying file is an actual file object. The HDUList is # resized, so we need to write it to a tmp file, delete the # original file, and rename the tmp file to the original file. if self._file.compression == 'gzip': new_file = gzip.GzipFile(name, mode='ab+') elif self._file.compression == 'bzip2': if not HAS_BZ2: raise ModuleNotFoundError( "This Python installation does not provide the bz2 module.") new_file = bz2.BZ2File(name, mode='w') else: new_file = name with self.fromfile(new_file, mode='append') as hdulist: for hdu in self: hdu._writeto(hdulist._file, inplace=True, copy=True) if sys.platform.startswith('win'): # Collect a list of open mmaps to the data; this well be # used later. See below. mmaps = [(idx, _get_array_mmap(hdu.data), hdu.data) for idx, hdu in enumerate(self) if hdu._has_data] hdulist._file.close() self._file.close() if sys.platform.startswith('win'): # Close all open mmaps to the data. This is only necessary on # Windows, which will not allow a file to be renamed or deleted # until all handles to that file have been closed. for idx, mmap, arr in mmaps: if mmap is not None: mmap.close() os.remove(self._file.name) # reopen the renamed new file with "update" mode os.rename(name, old_name) os.chmod(old_name, old_mode) if isinstance(new_file, gzip.GzipFile): old_file = gzip.GzipFile(old_name, mode='rb+') else: old_file = old_name ffo = _File(old_file, mode='update', memmap=old_memmap) self._file = ffo for hdu in self: # Need to update the _file attribute and close any open mmaps # on each HDU if hdu._has_data and _get_array_mmap(hdu.data) is not None: del hdu.data hdu._file = ffo if sys.platform.startswith('win'): # On Windows, all the original data mmaps were closed above. # However, it's possible that the user still has references to # the old data which would no longer work (possibly even cause # a segfault if they try to access it). This replaces the # buffers used by the original arrays with the buffers of mmap # arrays created from the new file. This seems to work, but # it's a flaming hack and carries no guarantees that it won't # lead to odd behavior in practice. Better to just not keep # references to data from files that had to be resized upon # flushing (on Windows--again, this is no problem on Linux). for idx, mmap, arr in mmaps: if mmap is not None: # https://github.com/numpy/numpy/issues/8628 with warnings.catch_warnings(): warnings.simplefilter('ignore', category=DeprecationWarning) arr.data = self[idx].data.data del mmaps # Just to be sure else: # The underlying file is not a file object, it is a file like # object. We can't write out to a file, we must update the file # like object in place. To do this, we write out to a temporary # file, then delete the contents in our file like object, then # write the contents of the temporary file to the now empty file # like object. self.writeto(name) hdulist = self.fromfile(name) ffo = self._file ffo.truncate(0) ffo.seek(0) for hdu in hdulist: hdu._writeto(ffo, inplace=True, copy=True) # Close the temporary file and delete it. hdulist.close() os.remove(hdulist._file.name) # reset the resize attributes after updating self._resize = False self._truncate = False for hdu in self: hdu._header._modified = False hdu._new = False hdu._file = ffo def _wasresized(self, verbose=False): """ Determine if any changes to the HDUList will require a file resize when flushing the file. Side effect of setting the objects _resize attribute. """ if not self._resize: # determine if any of the HDU is resized for hdu in self: # Header: nbytes = len(str(hdu._header)) if nbytes != (hdu._data_offset - hdu._header_offset): self._resize = True self._truncate = False if verbose: print('One or more header is resized.') break # Data: if not hdu._has_data: continue nbytes = hdu.size nbytes = nbytes + _pad_length(nbytes) if nbytes != hdu._data_size: self._resize = True self._truncate = False if verbose: print('One or more data area is resized.') break if self._truncate: try: self._file.truncate(hdu._data_offset + hdu._data_size) except OSError: self._resize = True self._truncate = False return self._resize
e854851dd982e935a835b8a8c22e75b060e93d0dd964272bf065c4543b846c6f
# Licensed under a 3-clause BSD style license - see PYFITS.rst import contextlib import copy import gc import pickle import re import sys import warnings import pytest import numpy as np from numpy import char as chararray try: import objgraph HAVE_OBJGRAPH = True except ImportError: HAVE_OBJGRAPH = False from astropy.io import fits from astropy.table import Table from astropy.units import UnitsWarning, Unit, UnrecognizedUnit from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1 from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning from astropy.io.fits.column import ColumnAttribute, Delayed, NUMPY2FITS from astropy.io.fits.util import decode_ascii from astropy.io.fits.verify import VerifyError from . import FitsTestCase def comparefloats(a, b): """ Compare two float scalars or arrays and see if they are consistent Consistency is determined ensuring the difference is less than the expected amount. Return True if consistent, False if any differences. """ aa = a bb = b # compute expected precision if aa.dtype.name == 'float32' or bb.dtype.name == 'float32': precision = 0.000001 else: precision = 0.0000000000000001 precision = 0.00001 # until precision problem is fixed in astropy.io.fits diff = np.absolute(aa - bb) mask0 = aa == 0 masknz = aa != 0. if np.any(mask0): if diff[mask0].max() != 0.: return False if np.any(masknz): if (diff[masknz] / np.absolute(aa[masknz])).max() > precision: return False return True def comparerecords(a, b): """ Compare two record arrays Does this field by field, using approximation testing for float columns (Complex not yet handled.) Column names not compared, but column types and sizes are. """ nfieldsa = len(a.dtype.names) nfieldsb = len(b.dtype.names) if nfieldsa != nfieldsb: print("number of fields don't match") return False for i in range(nfieldsa): fielda = a.field(i) fieldb = b.field(i) if fielda.dtype.char == 'S': fielda = decode_ascii(fielda) if fieldb.dtype.char == 'S': fieldb = decode_ascii(fieldb) if (not isinstance(fielda, type(fieldb)) and not isinstance(fieldb, type(fielda))): print("type(fielda): ", type(fielda), " fielda: ", fielda) print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb) print(f'field {i} type differs') return False if len(fielda) and isinstance(fielda[0], np.floating): if not comparefloats(fielda, fieldb): print("fielda: ", fielda) print("fieldb: ", fieldb) print(f'field {i} differs') return False elif (isinstance(fielda, fits.column._VLF) or isinstance(fieldb, fits.column._VLF)): for row in range(len(fielda)): if np.any(fielda[row] != fieldb[row]): print(f'fielda[{row}]: {fielda[row]}') print(f'fieldb[{row}]: {fieldb[row]}') print(f'field {i} differs in row {row}') else: if np.any(fielda != fieldb): print("fielda: ", fielda) print("fieldb: ", fieldb) print(f'field {i} differs') return False return True def _assert_attr_col(new_tbhdu, tbhdu): """ Helper function to compare column attributes """ # Double check that the headers are equivalent assert tbhdu.columns.names == new_tbhdu.columns.names attrs = [k for k, v in fits.Column.__dict__.items() if isinstance(v, ColumnAttribute)] for name in tbhdu.columns.names: col = tbhdu.columns[name] new_col = new_tbhdu.columns[name] for attr in attrs: if getattr(col, attr) and getattr(new_col, attr): assert getattr(col, attr) == getattr(new_col, attr) class TestTableFunctions(FitsTestCase): def test_constructor_copies_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153 Ensure that a header from one HDU is copied when used to initialize new HDU. This is like the test of the same name in test_image, but tests this for tables as well. """ ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()]) thdr = ifd[1].header thdr['FILENAME'] = 'labq01i3q_rawtag.fits' thdu = fits.BinTableHDU(header=thdr) ofd = fits.HDUList(thdu) ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits' # Original header should be unchanged assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits' def test_open(self): # open some existing FITS files: tt = fits.open(self.data('tb.fits')) fd = fits.open(self.data('test0.fits')) # create some local arrays a1 = chararray.array(['abc', 'def', 'xx']) r1 = np.array([11., 12., 13.], dtype=np.float32) # create a table from scratch, using a mixture of columns from existing # tables and locally created arrays: # first, create individual column definitions c1 = fits.Column(name='abc', format='3A', array=a1) c2 = fits.Column(name='def', format='E', array=r1) a3 = np.array([3, 4, 5], dtype='i2') c3 = fits.Column(name='xyz', format='I', array=a3) a4 = np.array([1, 2, 3], dtype='i2') c4 = fits.Column(name='t1', format='I', array=a4) a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8') c5 = fits.Column(name='t2', format='C', array=a5) # Note that X format must be two-D array a6 = np.array([[0], [1], [0]], dtype=np.uint8) c6 = fits.Column(name='t3', format='X', array=a6) a7 = np.array([101, 102, 103], dtype='i4') c7 = fits.Column(name='t4', format='J', array=a7) a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1], [0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0], [1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8) c8 = fits.Column(name='t5', format='11X', array=a8) # second, create a column-definitions object for all columns in a table x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8]) tbhdu = fits.BinTableHDU.from_columns(x) # another way to create a table is by using existing table's # information: x2 = fits.ColDefs(tt[1]) t2 = fits.BinTableHDU.from_columns(x2, nrows=2) ra = np.rec.array([ (1, 'abc', 3.7000002861022949, 0), (2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4') assert comparerecords(t2.data, ra) # the table HDU's data is a subclass of a record array, so we can # access one row like this: assert tbhdu.data[1][0] == a1[1] assert tbhdu.data[1][1] == r1[1] assert tbhdu.data[1][2] == a3[1] assert tbhdu.data[1][3] == a4[1] assert tbhdu.data[1][4] == a5[1] assert (tbhdu.data[1][5] == a6[1].view('bool')).all() assert tbhdu.data[1][6] == a7[1] assert (tbhdu.data[1][7] == a8[1]).all() # and a column like this: assert str(tbhdu.data.field('abc')) == "['abc' 'def' 'xx']" # An alternative way to create a column-definitions object is from an # existing table. _ = fits.ColDefs(tt[1]) # now we write out the newly created table HDU to a FITS file: fout = fits.HDUList(fits.PrimaryHDU()) fout.append(tbhdu) fout.writeto(self.temp('tableout1.fits'), overwrite=True) with fits.open(self.temp('tableout1.fits')) as f2: temp = f2[1].data.field(7) assert (temp[0] == [True, True, False, True, False, True, True, True, False, False, True]).all() # An alternative way to create an output table FITS file: fout2 = fits.open(self.temp('tableout2.fits'), 'append') fout2.append(fd[0]) fout2.append(tbhdu) fout2.close() tt.close() fd.close() def test_binary_table(self): # binary table: t = fits.open(self.data('tb.fits')) assert t[1].header['tform1'] == '1J' info = {'name': ['c1', 'c2', 'c3', 'c4'], 'format': ['1J', '3A', '1E', '1L'], 'unit': ['', '', '', ''], 'null': [-2147483647, '', '', ''], 'bscale': ['', '', 3, ''], 'bzero': ['', '', 0.4, ''], 'disp': ['I11', 'A3', 'G15.7', 'L6'], 'start': ['', '', '', ''], 'dim': ['', '', '', ''], 'coord_inc': ['', '', '', ''], 'coord_type': ['', '', '', ''], 'coord_unit': ['', '', '', ''], 'coord_ref_point': ['', '', '', ''], 'coord_ref_value': ['', '', '', ''], 'time_ref_pos': ['', '', '', '']} assert t[1].columns.info(output=False) == info ra = np.rec.array([ (1, 'abc', 3.7000002861022949, 0), (2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4') assert comparerecords(t[1].data, ra[:2]) # Change scaled field and scale back to the original array t[1].data.field('c4')[0] = 1 t[1].data._scale_back() assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]' # look at data column-wise assert (t[1].data.field(0) == np.array([1, 2])).all() # When there are scaled columns, the raw data are in data._parent t.close() def test_ascii_table(self): # ASCII table a = fits.open(self.data('ascii.fits')) ra1 = np.rec.array([ (10.123000144958496, 37), (5.1999998092651367, 23), (15.609999656677246, 17), (0.0, 0), (345.0, 345)], names='c1, c2') assert comparerecords(a[1].data, ra1) # Test slicing a2 = a[1].data[2:][2:] ra2 = np.rec.array([(345.0, 345)], names='c1, c2') assert comparerecords(a2, ra2) assert (a2.field(1) == np.array([345])).all() ra3 = np.rec.array([ (10.123000144958496, 37), (15.609999656677246, 17), (345.0, 345) ], names='c1, c2') assert comparerecords(a[1].data[::2], ra3) # Test Start Column a1 = chararray.array(['abcd', 'def']) r1 = np.array([11., 12.]) c1 = fits.Column(name='abc', format='A3', start=19, array=a1) c2 = fits.Column(name='def', format='E', start=3, array=r1) c3 = fits.Column(name='t1', format='I', array=[91, 92, 93]) hdu = fits.TableHDU.from_columns([c2, c1, c3]) assert (dict(hdu.data.dtype.fields) == {'abc': (np.dtype('|S3'), 18), 'def': (np.dtype('|S15'), 2), 't1': (np.dtype('|S10'), 21)}) hdu.writeto(self.temp('toto.fits'), overwrite=True) hdul = fits.open(self.temp('toto.fits')) assert comparerecords(hdu.data, hdul[1].data) hdul.close() # Test Scaling r1 = np.array([11., 12.]) c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3, bzero=0.6) hdu = fits.TableHDU.from_columns([c2]) hdu.writeto(self.temp('toto.fits'), overwrite=True) with open(self.temp('toto.fits')) as f: assert '4.95652173913043548D+00' in f.read() with fits.open(self.temp('toto.fits')) as hdul: assert comparerecords(hdu.data, hdul[1].data) # Test Integer precision according to width c1 = fits.Column(name='t2', format='I2', array=[91, 92, 93]) c2 = fits.Column(name='t4', format='I5', array=[91, 92, 93]) c3 = fits.Column(name='t8', format='I10', array=[91, 92, 93]) hdu = fits.TableHDU.from_columns([c1, c2, c3]) assert c1.array.dtype == np.int16 assert c2.array.dtype == np.int32 assert c3.array.dtype == np.int64 hdu.writeto(self.temp('toto.fits'), overwrite=True) with fits.open(self.temp('toto.fits')) as hdul: assert comparerecords(hdu.data, hdul[1].data) a.close() def test_endianness(self): x = np.ndarray((1,), dtype=object) channelsIn = np.array([3], dtype='uint8') x[0] = channelsIn col = fits.Column(name="Channels", format="PB()", array=x) cols = fits.ColDefs([col]) tbhdu = fits.BinTableHDU.from_columns(cols) tbhdu.name = "RFI" tbhdu.writeto(self.temp('testendian.fits'), overwrite=True) hduL = fits.open(self.temp('testendian.fits')) rfiHDU = hduL['RFI'] data = rfiHDU.data channelsOut = data.field('Channels')[0] assert (channelsIn == channelsOut).all() hduL.close() def test_column_endianness(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77 (Astropy doesn't preserve byte order of non-native order column arrays) """ a = [1., 2., 3., 4.] a1 = np.array(a, dtype='<f8') a2 = np.array(a, dtype='>f8') col1 = fits.Column(name='a', format='D', array=a1) col2 = fits.Column(name='b', format='D', array=a2) cols = fits.ColDefs([col1, col2]) tbhdu = fits.BinTableHDU.from_columns(cols) assert (tbhdu.data['a'] == a1).all() assert (tbhdu.data['b'] == a2).all() # Double check that the array is converted to the correct byte-order # for FITS (big-endian). tbhdu.writeto(self.temp('testendian.fits'), overwrite=True) with fits.open(self.temp('testendian.fits')) as hdul: assert (hdul[1].data['a'] == a2).all() assert (hdul[1].data['b'] == a2).all() def test_recarray_to_bintablehdu(self): bright = np.rec.array( [(1, 'Serius', -1.45, 'A1V'), (2, 'Canopys', -0.73, 'F0Ib'), (3, 'Rigil Kent', -0.1, 'G2V')], formats='int16,a20,float32,a10', names='order,name,mag,Sp') hdu = fits.BinTableHDU(bright) assert comparerecords(hdu.data, bright) hdu.writeto(self.temp('toto.fits'), overwrite=True) hdul = fits.open(self.temp('toto.fits')) assert comparerecords(hdu.data, hdul[1].data) assert comparerecords(bright, hdul[1].data) hdul.close() def test_numpy_ndarray_to_bintablehdu(self): desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'], 'formats': ['int', 'S20', 'float32', 'S10']}) a = np.array([(1, 'Serius', -1.45, 'A1V'), (2, 'Canopys', -0.73, 'F0Ib'), (3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc) hdu = fits.BinTableHDU(a) assert comparerecords(hdu.data, a.view(fits.FITS_rec)) hdu.writeto(self.temp('toto.fits'), overwrite=True) hdul = fits.open(self.temp('toto.fits')) assert comparerecords(hdu.data, hdul[1].data) hdul.close() def test_numpy_ndarray_to_bintablehdu_with_unicode(self): desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'], 'formats': ['int', 'U20', 'float32', 'U10']}) a = np.array([(1, 'Serius', -1.45, 'A1V'), (2, 'Canopys', -0.73, 'F0Ib'), (3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc) hdu = fits.BinTableHDU(a) assert comparerecords(hdu.data, a.view(fits.FITS_rec)) hdu.writeto(self.temp('toto.fits'), overwrite=True) hdul = fits.open(self.temp('toto.fits')) assert comparerecords(hdu.data, hdul[1].data) hdul.close() def test_new_table_from_recarray(self): bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'), (2, 'Canopys', -0.73, 'F0Ib'), (3, 'Rigil Kent', -0.1, 'G2V')], formats='int16,a20,float64,a10', names='order,name,mag,Sp') hdu = fits.TableHDU.from_columns(bright, nrows=2) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.data._coldefs._arrays[0])) assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.columns.columns[0].array)) assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])) # Ensure I can change the value of one data element and it effects # all of the others. hdu.data[0][0] = 213 assert hdu.data[0][0] == 213 assert hdu.data._coldefs._arrays[0][0] == 213 assert hdu.data._coldefs.columns[0].array[0] == 213 assert hdu.columns._arrays[0][0] == 213 assert hdu.columns.columns[0].array[0] == 213 hdu.data._coldefs._arrays[0][0] = 100 assert hdu.data[0][0] == 100 assert hdu.data._coldefs._arrays[0][0] == 100 assert hdu.data._coldefs.columns[0].array[0] == 100 assert hdu.columns._arrays[0][0] == 100 assert hdu.columns.columns[0].array[0] == 100 hdu.data._coldefs.columns[0].array[0] = 500 assert hdu.data[0][0] == 500 assert hdu.data._coldefs._arrays[0][0] == 500 assert hdu.data._coldefs.columns[0].array[0] == 500 assert hdu.columns._arrays[0][0] == 500 assert hdu.columns.columns[0].array[0] == 500 hdu.columns._arrays[0][0] = 600 assert hdu.data[0][0] == 600 assert hdu.data._coldefs._arrays[0][0] == 600 assert hdu.data._coldefs.columns[0].array[0] == 600 assert hdu.columns._arrays[0][0] == 600 assert hdu.columns.columns[0].array[0] == 600 hdu.columns.columns[0].array[0] = 800 assert hdu.data[0][0] == 800 assert hdu.data._coldefs._arrays[0][0] == 800 assert hdu.data._coldefs.columns[0].array[0] == 800 assert hdu.columns._arrays[0][0] == 800 assert hdu.columns.columns[0].array[0] == 800 assert (hdu.data.field(0) == np.array([800, 2], dtype=np.int16)).all() assert hdu.data[0][1] == 'Serius' assert hdu.data[1][1] == 'Canopys' assert (hdu.data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all() assert hdu.data[0][3] == 'A1V' assert hdu.data[1][3] == 'F0Ib' hdu.writeto(self.temp('toto.fits'), overwrite=True) with fits.open(self.temp('toto.fits')) as hdul: assert (hdul[1].data.field(0) == np.array([800, 2], dtype=np.int16)).all() assert hdul[1].data[0][1] == 'Serius' assert hdul[1].data[1][1] == 'Canopys' assert (hdul[1].data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all() assert hdul[1].data[0][3] == 'A1V' assert hdul[1].data[1][3] == 'F0Ib' del hdul hdu = fits.BinTableHDU.from_columns(bright, nrows=2) tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'), (2, 'Canopys', -0.73, 'F0Ib')], formats='int16,a20,float64,a10', names='order,name,mag,Sp') assert comparerecords(hdu.data, tmp) hdu.writeto(self.temp('toto.fits'), overwrite=True) with fits.open(self.temp('toto.fits')) as hdul: assert comparerecords(hdu.data, hdul[1].data) def test_new_fitsrec(self): """ Tests creating a new FITS_rec object from a multi-field ndarray. """ with fits.open(self.data('tb.fits')) as h: data = h[1].data new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype) appended = np.append(data, new_data).view(fits.FITS_rec) assert repr(appended).startswith('FITS_rec(') # This test used to check the entire string representation of FITS_rec, # but that has problems between different numpy versions. Instead just # check that the FITS_rec was created, and we'll let subsequent tests # worry about checking values and such def test_appending_a_column(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table1.fits')) counts = np.array([412, 434, 408, 417]) names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table2.fits')) # Append the rows of table 2 after the rows of table 1 # The column definitions are assumed to be the same # Open the two files we want to append t1 = fits.open(self.temp('table1.fits')) t2 = fits.open(self.temp('table2.fits')) # Get the number of rows in the table from the first file nrows1 = t1[1].data.shape[0] # Get the total number of rows in the resulting appended table nrows = t1[1].data.shape[0] + t2[1].data.shape[0] assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array) # Create a new table that consists of the data from the first table # but has enough space in the ndarray to hold the data from both tables hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows) # For each column in the tables append the data from table 2 after the # data from table 1. for i in range(len(t1[1].columns)): hdu.data.field(i)[nrows1:] = t2[1].data.field(i) hdu.writeto(self.temp('newtable.fits')) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]', '')] assert fits.info(self.temp('newtable.fits'), output=False) == info z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z, True), ('NGC2', 334, '', z, False), ('NGC3', 308, '', z, True), ('NCG4', 317, '', z, True), ('NGC5', 412, '', z, False), ('NGC6', 434, '', z, True), ('NGC7', 408, '', z, False), ('NCG8', 417, '', z, False)], formats='a10,u4,a10,5f4,l') assert comparerecords(hdu.data, array) # Verify that all of the references to the data point to the same # numarray hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 # Same verification from the file hdul = fits.open(self.temp('newtable.fits')) hdu = hdul[1] hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 t1.close() t2.close() hdul.close() def test_adding_a_column(self): # Tests adding a column to a table. counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4]) tbhdu = fits.BinTableHDU.from_columns(coldefs) assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum'] coldefs1 = coldefs + c5 tbhdu1 = fits.BinTableHDU.from_columns(coldefs1) assert tbhdu1.columns.names == ['target', 'counts', 'notes', 'spectrum', 'flag'] z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z, True), ('NGC2', 334, '', z, False), ('NGC3', 308, '', z, True), ('NCG4', 317, '', z, True)], formats='a10,u4,a10,5f4,l') assert comparerecords(tbhdu1.data, array) def test_adding_a_column_inplace(self): # Tests adding a column to a table. counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4]) tbhdu = fits.BinTableHDU.from_columns(coldefs) assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum'] tbhdu.columns.add_col(c5) assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum', 'flag'] z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z, True), ('NGC2', 334, '', z, False), ('NGC3', 308, '', z, True), ('NCG4', 317, '', z, True)], formats='a10,u4,a10,5f4,l') assert comparerecords(tbhdu.data, array) def test_adding_a_column_to_file(self): hdul = fits.open(self.data('table.fits')) tbhdu = hdul[1] col = fits.Column(name='a', array=np.array([1, 2]), format='K') tbhdu.columns.add_col(col) assert tbhdu.columns.names == ['target', 'V_mag', 'a'] array = np.rec.array( [('NGC1001', 11.1, 1), ('NGC1002', 12.3, 2), ('NGC1003', 15.2, 0)], formats='a20,f4,i8') assert comparerecords(tbhdu.data, array) hdul.close() def test_removing_a_column_inplace(self): # Tests adding a column to a table. counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum', 'flag'] tbhdu.columns.del_col('flag') assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum'] z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z), ('NGC2', 334, '', z), ('NGC3', 308, '', z), ('NCG4', 317, '', z)], formats='a10,u4,a10,5f4') assert comparerecords(tbhdu.data, array) tbhdu.columns.del_col('counts') tbhdu.columns.del_col('notes') assert tbhdu.columns.names == ['target', 'spectrum'] array = np.rec.array( [('NGC1', z), ('NGC2', z), ('NGC3', z), ('NCG4', z)], formats='a10,5f4') assert comparerecords(tbhdu.data, array) def test_removing_a_column_from_file(self): hdul = fits.open(self.data('table.fits')) tbhdu = hdul[1] tbhdu.columns.del_col('V_mag') assert tbhdu.columns.names == ['target'] array = np.rec.array( [('NGC1001', ), ('NGC1002', ), ('NGC1003', )], formats='a20') assert comparerecords(tbhdu.data, array) hdul.close() def test_merge_tables(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table1.fits')) counts = np.array([412, 434, 408, 417]) names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8']) c1 = fits.Column(name='target1', format='10A', array=names) c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes1', format='A10') c4 = fits.Column(name='spectrum1', format='5E') c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table2.fits')) # Merge the columns of table 2 after the columns of table 1 # The column names are assumed to be different # Open the two files we want to append t1 = fits.open(self.temp('table1.fits')) t2 = fits.open(self.temp('table2.fits')) hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns) z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False), ('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True), ('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False), ('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)], formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l') assert comparerecords(hdu.data, array) hdu.writeto(self.temp('newtable.fits')) # Verify that all of the references to the data point to the same # numarray hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 30, '4R x 10C', '[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')] assert fits.info(self.temp('newtable.fits'), output=False) == info hdul = fits.open(self.temp('newtable.fits')) hdu = hdul[1] assert (hdu.columns.names == ['target', 'counts', 'notes', 'spectrum', 'flag', 'target1', 'counts1', 'notes1', 'spectrum1', 'flag1']) z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False), ('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True), ('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False), ('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)], formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l') assert comparerecords(hdu.data, array) # Same verification from the file hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 t1.close() t2.close() hdul.close() def test_modify_column_attributes(self): """Regression test for https://github.com/astropy/astropy/issues/996 This just tests one particular use case, but it should apply pretty well to other similar cases. """ NULLS = {'a': 2, 'b': 'b', 'c': 2.3} data = np.array(list(zip([1, 2, 3, 4], ['a', 'b', 'c', 'd'], [2.3, 4.5, 6.7, 8.9])), dtype=[('a', int), ('b', 'S1'), ('c', float)]) b = fits.BinTableHDU(data=data) for col in b.columns: col.null = NULLS[col.name] b.writeto(self.temp('test.fits'), overwrite=True) with fits.open(self.temp('test.fits')) as hdul: header = hdul[1].header assert header['TNULL1'] == 2 assert header['TNULL2'] == 'b' assert header['TNULL3'] == 2.3 def test_multidimension_table_from_numpy_rec_columns(self): """Regression test for https://github.com/astropy/astropy/issues/5280 and https://github.com/astropy/astropy/issues/5287 multidimentional tables can now be written with the correct TDIM. Author: Stephen Bailey. """ dtype = [ ('x', (str, 5)), # 1D column of 5-character strings ('y', (str, 3), (4,)), # 2D column; each row is four 3-char strings ] data = np.zeros(2, dtype=dtype) data['x'] = ['abcde', 'xyz'] data['y'][0] = ['A', 'BC', 'DEF', '123'] data['y'][1] = ['X', 'YZ', 'PQR', '999'] table = Table(data) # Test convenience functions io.fits.writeto / getdata fits.writeto(self.temp('test.fits'), data) dx = fits.getdata(self.temp('test.fits')) assert data['x'].dtype == dx['x'].dtype assert data['y'].dtype == dx['y'].dtype assert np.all(data['x'] == dx['x']), 'x: {} != {}'.format(data['x'], dx['x']) assert np.all(data['y'] == dx['y']), 'y: {} != {}'.format(data['y'], dx['y']) # Test fits.BinTableHDU(data) and avoid convenience functions hdu0 = fits.PrimaryHDU() hdu1 = fits.BinTableHDU(data) hx = fits.HDUList([hdu0, hdu1]) hx.writeto(self.temp('test2.fits')) fx = fits.open(self.temp('test2.fits')) dx = fx[1].data fx.close() assert data['x'].dtype == dx['x'].dtype assert data['y'].dtype == dx['y'].dtype assert np.all(data['x'] == dx['x']), 'x: {} != {}'.format(data['x'], dx['x']) assert np.all(data['y'] == dx['y']), 'y: {} != {}'.format(data['y'], dx['y']) # Test Table write and read table.write(self.temp('test3.fits')) tx = Table.read(self.temp('test3.fits'), character_as_bytes=False) assert table['x'].dtype == tx['x'].dtype assert table['y'].dtype == tx['y'].dtype assert np.all(table['x'] == tx['x']), 'x: {} != {}'.format(table['x'], tx['x']) assert np.all(table['y'] == tx['y']), 'y: {} != {}'.format(table['y'], tx['y']) def test_mask_array(self): t = fits.open(self.data('table.fits')) tbdata = t[1].data mask = tbdata.field('V_mag') > 12 newtbdata = tbdata[mask] hdu = fits.BinTableHDU(newtbdata) hdu.writeto(self.temp('newtable.fits')) hdul = fits.open(self.temp('newtable.fits')) # match to a regex rather than a specific string. expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]" assert re.match(expect, str(hdu.data)) assert re.match(expect, str(hdul[1].data)) t.close() hdul.close() def test_slice_a_row(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) row = t1[1].data[2] assert row['counts'] == 308 a, b, c = row[1:4] assert a == counts[2] assert b == '' assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all() row['counts'] = 310 assert row['counts'] == 310 row[1] = 315 assert row['counts'] == 315 assert row[1:4]['counts'] == 315 pytest.raises(KeyError, lambda r: r[1:4]['flag'], row) row[1:4]['counts'] = 300 assert row[1:4]['counts'] == 300 assert row['counts'] == 300 row[1:4][0] = 400 assert row[1:4]['counts'] == 400 row[1:4]['counts'] = 300 assert row[1:4]['counts'] == 300 # Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59 row[1:4][::-1][-1] = 500 assert row[1:4]['counts'] == 500 row[1:4:2][0] = 300 assert row[1:4]['counts'] == 300 pytest.raises(KeyError, lambda r: r[1:4]['flag'], row) assert row[1:4].field(0) == 300 assert row[1:4].field('counts') == 300 pytest.raises(KeyError, row[1:4].field, 'flag') row[1:4].setfield('counts', 500) assert row[1:4].field(0) == 500 pytest.raises(KeyError, row[1:4].setfield, 'flag', False) assert t1[1].data._coldefs._arrays[1][2] == 500 assert t1[1].data._coldefs.columns[1].array[2] == 500 assert t1[1].columns._arrays[1][2] == 500 assert t1[1].columns.columns[1].array[2] == 500 assert t1[1].data[2][1] == 500 t1.close() def test_fits_record_len(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) assert len(t1[1].data[0]) == 5 assert len(t1[1].data[0][0:4]) == 4 assert len(t1[1].data[0][0:5]) == 5 assert len(t1[1].data[0][0:6]) == 5 assert len(t1[1].data[0][0:7]) == 5 assert len(t1[1].data[0][1:4]) == 3 assert len(t1[1].data[0][1:5]) == 4 assert len(t1[1].data[0][1:6]) == 4 assert len(t1[1].data[0][1:7]) == 4 t1.close() def test_add_data_by_rows(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) c1 = fits.Column(name='target', format='10A') c2 = fits.Column(name='counts', format='J', unit='DN') c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L') coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5) # Test assigning data to a tables row using a FITS_record tbhdu.data[0] = tbhdu1.data[0] tbhdu.data[4] = tbhdu1.data[3] # Test assigning data to a tables row using a tuple tbhdu.data[2] = ('NGC1', 312, 'A Note', np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32), True) # Test assigning data to a tables row using a list tbhdu.data[3] = ['JIM1', '33', 'A Note', np.array([1., 2., 3., 4., 5.], dtype=np.float32), True] # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.data._coldefs._arrays[0])) assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns.columns[0].array)) assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])) assert tbhdu.data[0][1] == 312 assert tbhdu.data._coldefs._arrays[1][0] == 312 assert tbhdu.data._coldefs.columns[1].array[0] == 312 assert tbhdu.columns._arrays[1][0] == 312 assert tbhdu.columns.columns[1].array[0] == 312 assert tbhdu.columns.columns[0].array[0] == 'NGC1' assert tbhdu.columns.columns[2].array[0] == '' assert (tbhdu.columns.columns[3].array[0] == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all() assert tbhdu.columns.columns[4].array[0] == True # noqa assert tbhdu.data[3][1] == 33 assert tbhdu.data._coldefs._arrays[1][3] == 33 assert tbhdu.data._coldefs.columns[1].array[3] == 33 assert tbhdu.columns._arrays[1][3] == 33 assert tbhdu.columns.columns[1].array[3] == 33 assert tbhdu.columns.columns[0].array[3] == 'JIM1' assert tbhdu.columns.columns[2].array[3] == 'A Note' assert (tbhdu.columns.columns[3].array[3] == np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all() assert tbhdu.columns.columns[4].array[3] == True # noqa def test_assign_multiple_rows_to_table(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) counts = np.array([112, 134, 108, 117]) names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32) tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9) # Assign the 4 rows from the second table to rows 5 thru 8 of the # new table. Note that the last row of the new table will still be # initialized to the default values. tbhdu2.data[4:] = tbhdu.data # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(tbhdu2.data._coldefs.columns[0].array) == id(tbhdu2.data._coldefs._arrays[0])) assert (id(tbhdu2.data._coldefs.columns[0].array) == id(tbhdu2.columns.columns[0].array)) assert (id(tbhdu2.data._coldefs.columns[0].array) == id(tbhdu2.columns._arrays[0])) assert tbhdu2.data[0][1] == 312 assert tbhdu2.data._coldefs._arrays[1][0] == 312 assert tbhdu2.data._coldefs.columns[1].array[0] == 312 assert tbhdu2.columns._arrays[1][0] == 312 assert tbhdu2.columns.columns[1].array[0] == 312 assert tbhdu2.columns.columns[0].array[0] == 'NGC1' assert tbhdu2.columns.columns[2].array[0] == '' assert (tbhdu2.columns.columns[3].array[0] == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all() assert tbhdu2.columns.columns[4].array[0] == True # noqa assert tbhdu2.data[4][1] == 112 assert tbhdu2.data._coldefs._arrays[1][4] == 112 assert tbhdu2.data._coldefs.columns[1].array[4] == 112 assert tbhdu2.columns._arrays[1][4] == 112 assert tbhdu2.columns.columns[1].array[4] == 112 assert tbhdu2.columns.columns[0].array[4] == 'NGC5' assert tbhdu2.columns.columns[2].array[4] == '' assert (tbhdu2.columns.columns[3].array[4] == np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all() assert tbhdu2.columns.columns[4].array[4] == False # noqa assert tbhdu2.columns.columns[1].array[8] == 0 assert tbhdu2.columns.columns[0].array[8] == '' assert tbhdu2.columns.columns[2].array[8] == '' assert (tbhdu2.columns.columns[3].array[8] == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all() assert tbhdu2.columns.columns[4].array[8] == False # noqa def test_verify_data_references(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) # Verify that original ColDefs object has independent Column # objects. assert id(coldefs.columns[0]) != id(c1) # Verify that original ColDefs object has independent ndarray # objects. assert id(coldefs.columns[0].array) != id(names) # Verify that original ColDefs object references the same data # object as the original Column object. assert id(coldefs.columns[0].array) == id(c1.array) assert id(coldefs.columns[0].array) == id(coldefs._arrays[0]) # Verify new HDU has an independent ColDefs object. assert id(coldefs) != id(tbhdu.columns) # Verify new HDU has independent Column objects. assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0]) # Verify new HDU has independent ndarray objects. assert (id(coldefs.columns[0].array) != id(tbhdu.columns.columns[0].array)) # Verify that both ColDefs objects in the HDU reference the same # Coldefs object. assert id(tbhdu.columns) == id(tbhdu.data._coldefs) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.data._coldefs._arrays[0])) assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns.columns[0].array)) assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])) tbhdu.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) t1[1].data[0][1] = 213 assert t1[1].data[0][1] == 213 assert t1[1].data._coldefs._arrays[1][0] == 213 assert t1[1].data._coldefs.columns[1].array[0] == 213 assert t1[1].columns._arrays[1][0] == 213 assert t1[1].columns.columns[1].array[0] == 213 t1[1].data._coldefs._arrays[1][0] = 100 assert t1[1].data[0][1] == 100 assert t1[1].data._coldefs._arrays[1][0] == 100 assert t1[1].data._coldefs.columns[1].array[0] == 100 assert t1[1].columns._arrays[1][0] == 100 assert t1[1].columns.columns[1].array[0] == 100 t1[1].data._coldefs.columns[1].array[0] = 500 assert t1[1].data[0][1] == 500 assert t1[1].data._coldefs._arrays[1][0] == 500 assert t1[1].data._coldefs.columns[1].array[0] == 500 assert t1[1].columns._arrays[1][0] == 500 assert t1[1].columns.columns[1].array[0] == 500 t1[1].columns._arrays[1][0] = 600 assert t1[1].data[0][1] == 600 assert t1[1].data._coldefs._arrays[1][0] == 600 assert t1[1].data._coldefs.columns[1].array[0] == 600 assert t1[1].columns._arrays[1][0] == 600 assert t1[1].columns.columns[1].array[0] == 600 t1[1].columns.columns[1].array[0] = 800 assert t1[1].data[0][1] == 800 assert t1[1].data._coldefs._arrays[1][0] == 800 assert t1[1].data._coldefs.columns[1].array[0] == 800 assert t1[1].columns._arrays[1][0] == 800 assert t1[1].columns.columns[1].array[0] == 800 t1.close() def test_new_table_with_ndarray(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray)) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(tbhdu1.data._coldefs.columns[0].array) == id(tbhdu1.data._coldefs._arrays[0])) assert (id(tbhdu1.data._coldefs.columns[0].array) == id(tbhdu1.columns.columns[0].array)) assert (id(tbhdu1.data._coldefs.columns[0].array) == id(tbhdu1.columns._arrays[0])) # Ensure I can change the value of one data element and it effects # all of the others. tbhdu1.data[0][1] = 213 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 tbhdu1.data._coldefs._arrays[1][0] = 100 assert tbhdu1.data[0][1] == 100 assert tbhdu1.data._coldefs._arrays[1][0] == 100 assert tbhdu1.data._coldefs.columns[1].array[0] == 100 assert tbhdu1.columns._arrays[1][0] == 100 assert tbhdu1.columns.columns[1].array[0] == 100 tbhdu1.data._coldefs.columns[1].array[0] = 500 assert tbhdu1.data[0][1] == 500 assert tbhdu1.data._coldefs._arrays[1][0] == 500 assert tbhdu1.data._coldefs.columns[1].array[0] == 500 assert tbhdu1.columns._arrays[1][0] == 500 assert tbhdu1.columns.columns[1].array[0] == 500 tbhdu1.columns._arrays[1][0] = 600 assert tbhdu1.data[0][1] == 600 assert tbhdu1.data._coldefs._arrays[1][0] == 600 assert tbhdu1.data._coldefs.columns[1].array[0] == 600 assert tbhdu1.columns._arrays[1][0] == 600 assert tbhdu1.columns.columns[1].array[0] == 600 tbhdu1.columns.columns[1].array[0] = 800 assert tbhdu1.data[0][1] == 800 assert tbhdu1.data._coldefs._arrays[1][0] == 800 assert tbhdu1.data._coldefs.columns[1].array[0] == 800 assert tbhdu1.columns._arrays[1][0] == 800 assert tbhdu1.columns.columns[1].array[0] == 800 tbhdu1.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) t1[1].data[0][1] = 213 assert t1[1].data[0][1] == 213 assert t1[1].data._coldefs._arrays[1][0] == 213 assert t1[1].data._coldefs.columns[1].array[0] == 213 assert t1[1].columns._arrays[1][0] == 213 assert t1[1].columns.columns[1].array[0] == 213 t1[1].data._coldefs._arrays[1][0] = 100 assert t1[1].data[0][1] == 100 assert t1[1].data._coldefs._arrays[1][0] == 100 assert t1[1].data._coldefs.columns[1].array[0] == 100 assert t1[1].columns._arrays[1][0] == 100 assert t1[1].columns.columns[1].array[0] == 100 t1[1].data._coldefs.columns[1].array[0] = 500 assert t1[1].data[0][1] == 500 assert t1[1].data._coldefs._arrays[1][0] == 500 assert t1[1].data._coldefs.columns[1].array[0] == 500 assert t1[1].columns._arrays[1][0] == 500 assert t1[1].columns.columns[1].array[0] == 500 t1[1].columns._arrays[1][0] = 600 assert t1[1].data[0][1] == 600 assert t1[1].data._coldefs._arrays[1][0] == 600 assert t1[1].data._coldefs.columns[1].array[0] == 600 assert t1[1].columns._arrays[1][0] == 600 assert t1[1].columns.columns[1].array[0] == 600 t1[1].columns.columns[1].array[0] = 800 assert t1[1].data[0][1] == 800 assert t1[1].data._coldefs._arrays[1][0] == 800 assert t1[1].data._coldefs.columns[1].array[0] == 800 assert t1[1].columns._arrays[1][0] == 800 assert t1[1].columns.columns[1].array[0] == 800 t1.close() def test_new_table_with_fits_rec(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.data[0][1] = 213 assert tbhdu.data[0][1] == 213 assert tbhdu.data._coldefs._arrays[1][0] == 213 assert tbhdu.data._coldefs.columns[1].array[0] == 213 assert tbhdu.columns._arrays[1][0] == 213 assert tbhdu.columns.columns[1].array[0] == 213 tbhdu.data._coldefs._arrays[1][0] = 100 assert tbhdu.data[0][1] == 100 assert tbhdu.data._coldefs._arrays[1][0] == 100 assert tbhdu.data._coldefs.columns[1].array[0] == 100 assert tbhdu.columns._arrays[1][0] == 100 assert tbhdu.columns.columns[1].array[0] == 100 tbhdu.data._coldefs.columns[1].array[0] = 500 assert tbhdu.data[0][1] == 500 assert tbhdu.data._coldefs._arrays[1][0] == 500 assert tbhdu.data._coldefs.columns[1].array[0] == 500 assert tbhdu.columns._arrays[1][0] == 500 assert tbhdu.columns.columns[1].array[0] == 500 tbhdu.columns._arrays[1][0] = 600 assert tbhdu.data[0][1] == 600 assert tbhdu.data._coldefs._arrays[1][0] == 600 assert tbhdu.data._coldefs.columns[1].array[0] == 600 assert tbhdu.columns._arrays[1][0] == 600 assert tbhdu.columns.columns[1].array[0] == 600 tbhdu.columns.columns[1].array[0] = 800 assert tbhdu.data[0][1] == 800 assert tbhdu.data._coldefs._arrays[1][0] == 800 assert tbhdu.data._coldefs.columns[1].array[0] == 800 assert tbhdu.columns._arrays[1][0] == 800 assert tbhdu.columns.columns[1].array[0] == 800 tbhdu.columns.columns[1].array[0] = 312 tbhdu.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) t1[1].data[0][1] = 1 fr = t1[1].data assert t1[1].data[0][1] == 1 assert t1[1].data._coldefs._arrays[1][0] == 1 assert t1[1].data._coldefs.columns[1].array[0] == 1 assert t1[1].columns._arrays[1][0] == 1 assert t1[1].columns.columns[1].array[0] == 1 assert fr[0][1] == 1 assert fr._coldefs._arrays[1][0] == 1 assert fr._coldefs.columns[1].array[0] == 1 fr._coldefs.columns[1].array[0] = 312 tbhdu1 = fits.BinTableHDU.from_columns(fr) i = 0 for row in tbhdu1.data: for j in range(len(row)): if isinstance(row[j], np.ndarray): assert (row[j] == tbhdu.data[i][j]).all() else: assert row[j] == tbhdu.data[i][j] i = i + 1 tbhdu1.data[0][1] = 213 assert t1[1].data[0][1] == 312 assert t1[1].data._coldefs._arrays[1][0] == 312 assert t1[1].data._coldefs.columns[1].array[0] == 312 assert t1[1].columns._arrays[1][0] == 312 assert t1[1].columns.columns[1].array[0] == 312 assert fr[0][1] == 312 assert fr._coldefs._arrays[1][0] == 312 assert fr._coldefs.columns[1].array[0] == 312 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 t1[1].data[0][1] = 10 assert t1[1].data[0][1] == 10 assert t1[1].data._coldefs._arrays[1][0] == 10 assert t1[1].data._coldefs.columns[1].array[0] == 10 assert t1[1].columns._arrays[1][0] == 10 assert t1[1].columns.columns[1].array[0] == 10 assert fr[0][1] == 10 assert fr._coldefs._arrays[1][0] == 10 assert fr._coldefs.columns[1].array[0] == 10 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 tbhdu1.data._coldefs._arrays[1][0] = 666 assert t1[1].data[0][1] == 10 assert t1[1].data._coldefs._arrays[1][0] == 10 assert t1[1].data._coldefs.columns[1].array[0] == 10 assert t1[1].columns._arrays[1][0] == 10 assert t1[1].columns.columns[1].array[0] == 10 assert fr[0][1] == 10 assert fr._coldefs._arrays[1][0] == 10 assert fr._coldefs.columns[1].array[0] == 10 assert tbhdu1.data[0][1] == 666 assert tbhdu1.data._coldefs._arrays[1][0] == 666 assert tbhdu1.data._coldefs.columns[1].array[0] == 666 assert tbhdu1.columns._arrays[1][0] == 666 assert tbhdu1.columns.columns[1].array[0] == 666 t1.close() def test_bin_table_hdu_constructor(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) hdu = fits.BinTableHDU(tbhdu1.data) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.data._coldefs._arrays[0])) assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.columns.columns[0].array)) assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])) # Verify that the references in the original HDU are the same as the # references in the new HDU. assert (id(tbhdu1.data._coldefs.columns[0].array) == id(hdu.data._coldefs._arrays[0])) # Verify that a change in the new HDU is reflected in both the new # and original HDU. hdu.data[0][1] = 213 assert hdu.data[0][1] == 213 assert hdu.data._coldefs._arrays[1][0] == 213 assert hdu.data._coldefs.columns[1].array[0] == 213 assert hdu.columns._arrays[1][0] == 213 assert hdu.columns.columns[1].array[0] == 213 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 hdu.data._coldefs._arrays[1][0] = 100 assert hdu.data[0][1] == 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert tbhdu1.data[0][1] == 100 assert tbhdu1.data._coldefs._arrays[1][0] == 100 assert tbhdu1.data._coldefs.columns[1].array[0] == 100 assert tbhdu1.columns._arrays[1][0] == 100 assert tbhdu1.columns.columns[1].array[0] == 100 hdu.data._coldefs.columns[1].array[0] = 500 assert hdu.data[0][1] == 500 assert hdu.data._coldefs._arrays[1][0] == 500 assert hdu.data._coldefs.columns[1].array[0] == 500 assert hdu.columns._arrays[1][0] == 500 assert hdu.columns.columns[1].array[0] == 500 assert tbhdu1.data[0][1] == 500 assert tbhdu1.data._coldefs._arrays[1][0] == 500 assert tbhdu1.data._coldefs.columns[1].array[0] == 500 assert tbhdu1.columns._arrays[1][0] == 500 assert tbhdu1.columns.columns[1].array[0] == 500 hdu.columns._arrays[1][0] = 600 assert hdu.data[0][1] == 600 assert hdu.data._coldefs._arrays[1][0] == 600 assert hdu.data._coldefs.columns[1].array[0] == 600 assert hdu.columns._arrays[1][0] == 600 assert hdu.columns.columns[1].array[0] == 600 assert tbhdu1.data[0][1] == 600 assert tbhdu1.data._coldefs._arrays[1][0] == 600 assert tbhdu1.data._coldefs.columns[1].array[0] == 600 assert tbhdu1.columns._arrays[1][0] == 600 assert tbhdu1.columns.columns[1].array[0] == 600 hdu.columns.columns[1].array[0] = 800 assert hdu.data[0][1] == 800 assert hdu.data._coldefs._arrays[1][0] == 800 assert hdu.data._coldefs.columns[1].array[0] == 800 assert hdu.columns._arrays[1][0] == 800 assert hdu.columns.columns[1].array[0] == 800 assert tbhdu1.data[0][1] == 800 assert tbhdu1.data._coldefs._arrays[1][0] == 800 assert tbhdu1.data._coldefs.columns[1].array[0] == 800 assert tbhdu1.columns._arrays[1][0] == 800 assert tbhdu1.columns.columns[1].array[0] == 800 def test_constructor_name_arg(self): """testConstructorNameArg Passing name='...' to the BinTableHDU and TableHDU constructors should set the .name attribute and 'EXTNAME' header keyword, and override any name in an existing 'EXTNAME' value. """ for hducls in [fits.BinTableHDU, fits.TableHDU]: # First test some default assumptions hdu = hducls() assert hdu.name == '' assert 'EXTNAME' not in hdu.header hdu.name = 'FOO' assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' # Passing name to constructor hdu = hducls(name='FOO') assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' # And overriding a header with a different extname hdr = fits.Header() hdr['EXTNAME'] = 'EVENTS' hdu = hducls(header=hdr, name='FOO') assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' def test_constructor_ver_arg(self): for hducls in [fits.BinTableHDU, fits.TableHDU]: # First test some default assumptions hdu = hducls() assert hdu.ver == 1 assert 'EXTVER' not in hdu.header hdu.ver = 2 assert hdu.ver == 2 assert hdu.header['EXTVER'] == 2 # Passing name to constructor hdu = hducls(ver=3) assert hdu.ver == 3 assert hdu.header['EXTVER'] == 3 # And overriding a header with a different extver hdr = fits.Header() hdr['EXTVER'] = 4 hdu = hducls(header=hdr, ver=5) assert hdu.ver == 5 assert hdu.header['EXTVER'] == 5 def test_unicode_colname(self): """ Regression test for https://github.com/astropy/astropy/issues/5204 "Handle unicode FITS BinTable column names on Python 2" """ col = fits.Column(name='spam', format='E', array=[42.]) # This used to raise a TypeError, now it works fits.BinTableHDU.from_columns([col]) def test_bin_table_with_logical_array(self): c1 = fits.Column(name='flag', format='2L', array=[[True, False], [False, True]]) coldefs = fits.ColDefs([c1]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) assert (tbhdu1.data.field('flag')[0] == np.array([True, False], dtype=bool)).all() assert (tbhdu1.data.field('flag')[1] == np.array([False, True], dtype=bool)).all() tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data) assert (tbhdu.data.field('flag')[0] == np.array([True, False], dtype=bool)).all() assert (tbhdu.data.field('flag')[1] == np.array([False, True], dtype=bool)).all() def test_fits_rec_column_access(self): tbdata = fits.getdata(self.data('table.fits')) assert (tbdata.V_mag == tbdata.field('V_mag')).all() assert (tbdata.V_mag == tbdata['V_mag']).all() # Table with scaling (c3) and tnull (c1) tbdata = fits.getdata(self.data('tb.fits')) for col in ('c1', 'c2', 'c3', 'c4'): data = getattr(tbdata, col) assert (data == tbdata.field(col)).all() assert (data == tbdata[col]).all() # ascii table tbdata = fits.getdata(self.data('ascii.fits')) for col in ('a', 'b'): data = getattr(tbdata, col) assert (data == tbdata.field(col)).all() assert (data == tbdata[col]).all() # with VLA column col1 = fits.Column(name='x', format='PI()', array=np.array([[45, 56], [11, 12, 13]], dtype=np.object_)) hdu = fits.BinTableHDU.from_columns([col1]) assert type(hdu.data['x']) == type(hdu.data.x) # noqa assert (hdu.data['x'][0] == hdu.data.x[0]).all() assert (hdu.data['x'][1] == hdu.data.x[1]).all() def test_table_with_zero_width_column(self): hdul = fits.open(self.data('zerowidth.fits')) tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM' assert 'ORBPARM' in tbhdu.columns.names # The ORBPARM column should not be in the data, though the data should # be readable assert 'ORBPARM' in tbhdu.data.names assert 'ORBPARM' in tbhdu.data.dtype.names # Verify that some of the data columns are still correctly accessible # by name assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16' assert comparefloats( tbhdu.data[0]['STABXYZ'], np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64)) assert tbhdu.data[0]['NOSTA'] == 1 assert tbhdu.data[0]['MNTSTA'] == 0 assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT' assert comparefloats( tbhdu.data[-1]['STABXYZ'], np.array([0.0, 0.0, 0.0], dtype=np.float64)) assert tbhdu.data[-1]['NOSTA'] == 29 assert tbhdu.data[-1]['MNTSTA'] == 0 hdul.writeto(self.temp('newtable.fits')) hdul.close() hdul = fits.open(self.temp('newtable.fits')) tbhdu = hdul[2] # Verify that the previous tests still hold after writing assert 'ORBPARM' in tbhdu.columns.names assert 'ORBPARM' in tbhdu.data.names assert 'ORBPARM' in tbhdu.data.dtype.names assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16' assert comparefloats( tbhdu.data[0]['STABXYZ'], np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64)) assert tbhdu.data[0]['NOSTA'] == 1 assert tbhdu.data[0]['MNTSTA'] == 0 assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT' assert comparefloats( tbhdu.data[-1]['STABXYZ'], np.array([0.0, 0.0, 0.0], dtype=np.float64)) assert tbhdu.data[-1]['NOSTA'] == 29 assert tbhdu.data[-1]['MNTSTA'] == 0 hdul.close() def test_string_column_padding(self): a = ['img1', 'img2', 'img3a', 'p'] s = 'img1\x00\x00\x00\x00\x00\x00' \ 'img2\x00\x00\x00\x00\x00\x00' \ 'img3a\x00\x00\x00\x00\x00' \ 'p\x00\x00\x00\x00\x00\x00\x00\x00\x00' acol = fits.Column(name='MEMNAME', format='A10', array=chararray.array(a)) ahdu = fits.BinTableHDU.from_columns([acol]) assert ahdu.data.tobytes().decode('raw-unicode-escape') == s ahdu.writeto(self.temp('newtable.fits')) with fits.open(self.temp('newtable.fits')) as hdul: assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s assert (hdul[1].data['MEMNAME'] == a).all() del hdul ahdu = fits.TableHDU.from_columns([acol]) ahdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as hdul: assert (hdul[1].data.tobytes().decode('raw-unicode-escape') == s.replace('\x00', ' ')) assert (hdul[1].data['MEMNAME'] == a).all() ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy()) del hdul # Now serialize once more as a binary table; padding bytes should # revert to zeroes ahdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as hdul: assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s assert (hdul[1].data['MEMNAME'] == a).all() def test_multi_dimensional_columns(self): """ Tests the multidimensional column implementation with both numeric arrays and string arrays. """ data = np.rec.array( [([0, 1, 2, 3, 4, 5], 'row1' * 2), ([6, 7, 8, 9, 0, 1], 'row2' * 2), ([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8') thdu = fits.BinTableHDU.from_columns(data) thdu.writeto(self.temp('newtable.fits')) with fits.open(self.temp('newtable.fits'), mode='update') as hdul: # Modify the TDIM fields to my own specification hdul[1].header['TDIM1'] = '(2,3)' hdul[1].header['TDIM2'] = '(4,2)' with fits.open(self.temp('newtable.fits')) as hdul: thdu = hdul[1] c1 = thdu.data.field(0) c2 = thdu.data.field(1) assert c1.shape == (3, 3, 2) assert c2.shape == (3, 2) assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [0, 1]], [[2, 3], [4, 5], [6, 7]]])).all() assert (c2 == np.array([['row1', 'row1'], ['row2', 'row2'], ['row3', 'row3']])).all() del c1 del c2 del thdu del hdul # Test setting the TDIMn header based on the column data data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)]) data['x'] = 1, 2, 3 data['s'] = 'ok' fits.writeto(self.temp('newtable.fits'), data, overwrite=True) t = fits.getdata(self.temp('newtable.fits')) assert t.field(1).dtype.str[-1] == '5' assert t.field(1).shape == (3, 4) # Like the previous test, but with an extra dimension (a bit more # complicated) data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))]) data['x'] = 1, 2, 3 data['s'] = 'ok' del t fits.writeto(self.temp('newtable.fits'), data, overwrite=True) t = fits.getdata(self.temp('newtable.fits')) assert t.field(1).dtype.str[-1] == '5' assert t.field(1).shape == (3, 4, 3) def test_oned_array_single_element(self): # a table with rows that are 1d arrays of a single value data = np.array([(1, ), (2, )], dtype=([('x', 'i4', (1, ))])) thdu = fits.BinTableHDU.from_columns(data) thdu.writeto(self.temp('onedtable.fits')) with fits.open(self.temp('onedtable.fits')) as hdul: thdu = hdul[1] c = thdu.data.field(0) assert c.shape == (2, 1) assert thdu.header['TDIM1'] == '(1)' def test_bin_table_init_from_string_array_column(self): """ Tests two ways of creating a new `BinTableHDU` from a column of string arrays. This tests for a couple different regressions, and ensures that both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work equivalently. Some of this is redundant with the following test, but checks some subtly different cases. """ data = [[b'abcd', b'efgh'], [b'ijkl', b'mnop'], [b'qrst', b'uvwx']] arr = np.array([(data,), (data,), (data,), (data,), (data,)], dtype=[('S', '(3, 2)S4')]) tbhdu1 = fits.BinTableHDU(data=arr) def test_dims_and_roundtrip(tbhdu): assert tbhdu.data['S'].shape == (5, 3, 2) assert tbhdu.data['S'].dtype.str.endswith('U4') tbhdu.writeto(self.temp('test.fits'), overwrite=True) with fits.open(self.temp('test.fits')) as hdul: tbhdu2 = hdul[1] assert tbhdu2.header['TDIM1'] == '(4,2,3)' assert tbhdu2.data['S'].shape == (5, 3, 2) assert tbhdu.data['S'].dtype.str.endswith('U4') assert np.all(tbhdu2.data['S'] == tbhdu.data['S']) test_dims_and_roundtrip(tbhdu1) tbhdu2 = fits.BinTableHDU.from_columns(arr) test_dims_and_roundtrip(tbhdu2) def test_columns_with_truncating_tdim(self): """ According to the FITS standard (section 7.3.2): If the number of elements in the array implied by the TDIMn is less than the allocated size of the ar- ray in the FITS file, then the unused trailing elements should be interpreted as containing undefined fill values. *deep sigh* What this means is if a column has a repeat count larger than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)', but TFORM1 = 6I), then instead of this being an outright error we are to take the first 4 elements as implied by the TDIM and ignore the additional two trailing elements. """ # It's hard to even successfully create a table like this. I think # it *should* be difficult, but once created it should at least be # possible to read. arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']] arr2 = [1, 2, 3, 4, 5] arr = np.array([(arr1, arr2), (arr1, arr2)], dtype=[('a', '(3, 2)S2'), ('b', '5i8')]) tbhdu = fits.BinTableHDU(data=arr) tbhdu.writeto(self.temp('test.fits')) with open(self.temp('test.fits'), 'rb') as f: raw_bytes = f.read() # Artificially truncate TDIM in the header; this seems to be the # easiest way to do this while getting around Astropy's insistence on the # data and header matching perfectly; again, we have no interest in # making it possible to write files in this format, only read them with open(self.temp('test.fits'), 'wb') as f: f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)')) with fits.open(self.temp('test.fits')) as hdul: tbhdu2 = hdul[1] assert tbhdu2.header['TDIM1'] == '(2,2,2)' assert tbhdu2.header['TFORM1'] == '12A' for row in tbhdu2.data: assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']]) assert np.all(row['b'] == [1, 2, 3, 4, 5]) def test_string_array_round_trip(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201""" data = [['abc', 'def', 'ghi'], ['jkl', 'mno', 'pqr'], ['stu', 'vwx', 'yz ']] recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3']) t = fits.BinTableHDU(data=recarr) t.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert 'TDIM1' in h[1].header assert h[1].header['TDIM1'] == '(3,3,3)' assert len(h[1].data) == 2 assert len(h[1].data[0]) == 1 assert (h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], 'ascii')).all() with fits.open(self.temp('test.fits')) as h: # Access the data; I think this is necessary to exhibit the bug # reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201 h[1].data[:] h.writeto(self.temp('test2.fits')) with fits.open(self.temp('test2.fits')) as h: assert 'TDIM1' in h[1].header assert h[1].header['TDIM1'] == '(3,3,3)' assert len(h[1].data) == 2 assert len(h[1].data[0]) == 1 assert (h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], 'ascii')).all() def test_new_table_with_nd_column(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/3 """ arra = np.array(['a', 'b'], dtype='|S1') arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2') arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) cols = [ fits.Column(name='str', format='1A', array=arra), fits.Column(name='strarray', format='4A', dim='(2,2)', array=arrb), fits.Column(name='intarray', format='4I', dim='(2, 2)', array=arrc) ] hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols)) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: # Need to force string arrays to byte arrays in order to compare # correctly on Python 3 assert (h[1].data['str'].encode('ascii') == arra).all() assert (h[1].data['strarray'].encode('ascii') == arrb).all() assert (h[1].data['intarray'] == arrc).all() def test_mismatched_tform_and_tdim(self): """Normally the product of the dimensions listed in a TDIMn keyword must be less than or equal to the repeat count in the TFORMn keyword. This tests that this works if less than (treating the trailing bytes as unspecified fill values per the FITS standard) and fails if the dimensions specified by TDIMn are greater than the repeat count. """ arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]]) cols = [fits.Column(name='a', format='20I', dim='(2,2)', array=arra), fits.Column(name='b', format='4I', dim='(2,2)', array=arrb)] # The first column has the mismatched repeat count hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols)) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert h[1].header['TFORM1'] == '20I' assert h[1].header['TFORM2'] == '4I' assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)' assert (h[1].data['a'] == arra).all() assert (h[1].data['b'] == arrb).all() assert h[1].data.itemsize == 48 # 16-bits times 24 # If dims is more than the repeat count in the format specifier raise # an error pytest.raises(VerifyError, fits.Column, name='a', format='2I', dim='(2,2)', array=arra) def test_tdim_of_size_one(self): """Regression test for https://github.com/astropy/astropy/pull/3580""" with fits.open(self.data('tdim.fits')) as hdulist: assert hdulist[1].data['V_mag'].shape == (3, 1, 1) def test_slicing(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52""" with fits.open(self.data('table.fits')) as f: data = f[1].data targets = data.field('target') s = data[:] assert (s.field('target') == targets).all() for n in range(len(targets) + 2): s = data[:n] assert (s.field('target') == targets[:n]).all() s = data[n:] assert (s.field('target') == targets[n:]).all() s = data[::2] assert (s.field('target') == targets[::2]).all() s = data[::-1] assert (s.field('target') == targets[::-1]).all() def test_array_slicing(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55""" with fits.open(self.data('table.fits')) as f: data = f[1].data s1 = data[data['target'] == 'NGC1001'] s2 = data[np.where(data['target'] == 'NGC1001')] s3 = data[[0]] s4 = data[:1] for s in [s1, s2, s3, s4]: assert isinstance(s, fits.FITS_rec) assert comparerecords(s1, s2) assert comparerecords(s2, s3) assert comparerecords(s3, s4) def test_array_broadcasting(self): """ Regression test for https://github.com/spacetelescope/PyFITS/pull/48 """ with fits.open(self.data('table.fits')) as hdu: data = hdu[1].data data['V_mag'] = 0 assert np.all(data['V_mag'] == 0) data['V_mag'] = 1 assert np.all(data['V_mag'] == 1) for container in (list, tuple, np.array): data['V_mag'] = container([1, 2, 3]) assert np.array_equal(data['V_mag'], np.array([1, 2, 3])) def test_array_slicing_readonly(self): """ Like test_array_slicing but with the file opened in 'readonly' mode. Regression test for a crash when slicing readonly memmap'd tables. """ with fits.open(self.data('table.fits'), mode='readonly') as f: data = f[1].data s1 = data[data['target'] == 'NGC1001'] s2 = data[np.where(data['target'] == 'NGC1001')] s3 = data[[0]] s4 = data[:1] for s in [s1, s2, s3, s4]: assert isinstance(s, fits.FITS_rec) assert comparerecords(s1, s2) assert comparerecords(s2, s3) assert comparerecords(s3, s4) @pytest.mark.parametrize('tablename', ['table.fits', 'tb.fits']) def test_dump_load_round_trip(self, tablename): """ A simple test of the dump/load methods; dump the data, column, and header files and try to reload the table from them. """ with fits.open(self.data(tablename)) as hdul: tbhdu = hdul[1] datafile = self.temp('data.txt') cdfile = self.temp('coldefs.txt') hfile = self.temp('header.txt') tbhdu.dump(datafile, cdfile, hfile) new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile) assert comparerecords(tbhdu.data, new_tbhdu.data) _assert_attr_col(new_tbhdu, hdul[1]) def test_dump_load_array_colums(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/22 Ensures that a table containing a multi-value array column can be dumped and loaded successfully. """ data = np.rec.array([('a', [1, 2, 3, 4], 0.1), ('b', [5, 6, 7, 8], 0.2)], formats='a1,4i4,f8') tbhdu = fits.BinTableHDU.from_columns(data) datafile = self.temp('data.txt') cdfile = self.temp('coldefs.txt') hfile = self.temp('header.txt') tbhdu.dump(datafile, cdfile, hfile) new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile) assert comparerecords(tbhdu.data, new_tbhdu.data) assert str(tbhdu.header) == str(new_tbhdu.header) def test_load_guess_format(self): """ Tests loading a table dump with no supplied coldefs or header, so that the table format has to be guessed at. There is of course no exact science to this; the table that's produced simply uses sensible guesses for that format. Ideally this should never have to be used. """ # Create a table containing a variety of data types. a0 = np.array([False, True, False], dtype=bool) c0 = fits.Column(name='c0', format='L', array=a0) # Format X currently not supported by the format # a1 = np.array([[0], [1], [0]], dtype=np.uint8) # c1 = fits.Column(name='c1', format='X', array=a1) a2 = np.array([1, 128, 255], dtype=np.uint8) c2 = fits.Column(name='c2', format='B', array=a2) a3 = np.array([-30000, 1, 256], dtype=np.int16) c3 = fits.Column(name='c3', format='I', array=a3) a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32) c4 = fits.Column(name='c4', format='J', array=a4) a5 = np.array(['a', 'abc', 'ab']) c5 = fits.Column(name='c5', format='A3', array=a5) a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64) c6 = fits.Column(name='c6', format='D', array=a6) a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j], dtype=np.complex128) c7 = fits.Column(name='c7', format='M', array=a7) a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32) c8 = fits.Column(name='c8', format='PJ()', array=a8) tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8]) datafile = self.temp('data.txt') tbhdu.dump(datafile) new_tbhdu = fits.BinTableHDU.load(datafile) # In this particular case the record data at least should be equivalent assert comparerecords(tbhdu.data, new_tbhdu.data) def test_attribute_field_shadowing(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86 Numpy recarray objects have a poorly-considered feature of allowing field access by attribute lookup. However, if a field name coincides with an existing attribute/method of the array, the existing name takes presence (making the attribute-based field lookup completely unreliable in general cases). This ensures that any FITS_rec attributes still work correctly even when there is a field with the same name as that attribute. """ c1 = fits.Column(name='names', format='I', array=[1]) c2 = fits.Column(name='formats', format='I', array=[2]) c3 = fits.Column(name='other', format='I', array=[3]) t = fits.BinTableHDU.from_columns([c1, c2, c3]) assert t.data.names == ['names', 'formats', 'other'] assert t.data.formats == ['I'] * 3 assert (t.data['names'] == [1]).all() assert (t.data['formats'] == [2]).all() assert (t.data.other == [3]).all() def test_table_from_bool_fields(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113 Tests creating a table from a recarray containing numpy.bool columns. """ array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1') thdu = fits.BinTableHDU.from_columns(array) assert thdu.columns.formats == ['L', 'L'] assert comparerecords(thdu.data, array) # Test round trip thdu.writeto(self.temp('table.fits')) data = fits.getdata(self.temp('table.fits'), ext=1) assert thdu.columns.formats == ['L', 'L'] assert comparerecords(data, array) def test_table_from_bool_fields2(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215 Tests the case where a multi-field ndarray (not a recarray) containing a bool field is used to initialize a `BinTableHDU`. """ arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')]) hdu = fits.BinTableHDU(data=arr) assert (hdu.data['a'] == arr['a']).all() def test_bool_column_update(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139""" c1 = fits.Column('F1', 'L', array=[True, False]) c2 = fits.Column('F2', 'L', array=[False, True]) thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2])) thdu.writeto(self.temp('table.fits')) with fits.open(self.temp('table.fits'), mode='update') as hdul: hdul[1].data['F1'][1] = True hdul[1].data['F2'][0] = True with fits.open(self.temp('table.fits')) as hdul: assert (hdul[1].data['F1'] == [True, True]).all() assert (hdul[1].data['F2'] == [True, True]).all() def test_missing_tnull(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197""" c = fits.Column('F1', 'A3', null='---', array=np.array(['1.0', '2.0', '---', '3.0']), ascii=True) table = fits.TableHDU.from_columns([c]) table.writeto(self.temp('test.fits')) # Now let's delete the TNULL1 keyword, making this essentially # unreadable with fits.open(self.temp('test.fits'), mode='update') as h: h[1].header['TFORM1'] = 'E3' del h[1].header['TNULL1'] with fits.open(self.temp('test.fits')) as h: pytest.raises(ValueError, lambda: h[1].data['F1']) try: with fits.open(self.temp('test.fits')) as h: h[1].data['F1'] except ValueError as e: assert str(e).endswith( "the header may be missing the necessary TNULL1 " "keyword or the table contains invalid data") def test_blank_field_zero(self): """Regression test for https://github.com/astropy/astropy/issues/5134 Blank values in numerical columns of ASCII tables should be replaced with zeros, so they can be loaded into numpy arrays. When a TNULL value is set and there are blank fields not equal to that value, they should be replaced with zeros. """ # Test an integer column with blank string as null nullval1 = ' ' c1 = fits.Column('F1', format='I8', null=nullval1, array=np.array([0, 1, 2, 3, 4]), ascii=True) table = fits.TableHDU.from_columns([c1]) table.writeto(self.temp('ascii_null.fits')) # Replace the 1st col, 3rd row, with a null field. with open(self.temp('ascii_null.fits'), mode='r+') as h: nulled = h.read().replace('2 ', ' ') h.seek(0) h.write(nulled) with fits.open(self.temp('ascii_null.fits'), memmap=True) as f: assert f[1].data[2][0] == 0 # Test a float column with a null value set and blank fields. nullval2 = 'NaN' c2 = fits.Column('F1', format='F12.8', null=nullval2, array=np.array([1.0, 2.0, 3.0, 4.0]), ascii=True) table = fits.TableHDU.from_columns([c2]) table.writeto(self.temp('ascii_null2.fits')) # Replace the 1st col, 3rd row, with a null field. with open(self.temp('ascii_null2.fits'), mode='r+') as h: nulled = h.read().replace('3.00000000', ' ') h.seek(0) h.write(nulled) with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f: # (Currently it should evaluate to 0.0, but if a TODO in fitsrec is # completed, then it should evaluate to NaN.) assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0]) def test_column_array_type_mismatch(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218""" arr = [-99] * 20 col = fits.Column('mag', format='E', array=arr) assert (arr == col.array).all() def test_table_none(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/27 """ with fits.open(self.data('tb.fits')) as h: h[1].data h[1].data = None assert isinstance(h[1].data, fits.FITS_rec) assert len(h[1].data) == 0 h[1].writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert h[1].header['NAXIS'] == 2 assert h[1].header['NAXIS1'] == 12 assert h[1].header['NAXIS2'] == 0 assert isinstance(h[1].data, fits.FITS_rec) assert len(h[1].data) == 0 def test_unncessary_table_load(self): """Test unnecessary parsing and processing of FITS tables when writing directly from one FITS file to a new file without first reading the data for user manipulation. In other words, it should be possible to do a direct copy of the raw data without unnecessary processing of the data. """ with fits.open(self.data('table.fits')) as h: h[1].writeto(self.temp('test.fits')) # Since this was a direct copy the h[1].data attribute should not have # even been accessed (since this means the data was read and parsed) assert 'data' not in h[1].__dict__ with fits.open(self.data('table.fits')) as h1: with fits.open(self.temp('test.fits')) as h2: assert str(h1[1].header) == str(h2[1].header) assert comparerecords(h1[1].data, h2[1].data) def test_table_from_columns_of_other_table(self): """Tests a rare corner case where the columns of an existing table are used to create a new table with the new_table function. In this specific case, however, the existing table's data has not been read yet, so new_table has to get at it through the Delayed proxy. Note: Although this previously tested new_table it now uses BinTableHDU.from_columns directly, around which new_table is a mere wrapper. """ hdul = fits.open(self.data('table.fits')) # Make sure the column array is in fact delayed... assert isinstance(hdul[1].columns._arrays[0], Delayed) # Create a new table... t = fits.BinTableHDU.from_columns(hdul[1].columns) # The original columns should no longer be delayed... assert not isinstance(hdul[1].columns._arrays[0], Delayed) t.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul2: assert comparerecords(hdul[1].data, hdul2[1].data) hdul.close() def test_bintable_to_asciitable(self): """Tests initializing a TableHDU with the data from a BinTableHDU.""" with fits.open(self.data('tb.fits')) as hdul: tbdata = hdul[1].data tbhdu = fits.TableHDU(data=tbdata) tbhdu.writeto(self.temp('test.fits'), overwrite=True) with fits.open(self.temp('test.fits')) as hdul2: tbdata2 = hdul2[1].data assert np.all(tbdata['c1'] == tbdata2['c1']) assert np.all(tbdata['c2'] == tbdata2['c2']) # c3 gets converted from float32 to float64 when writing # test.fits, so cast to float32 before testing that the correct # value is retrieved assert np.all(tbdata['c3'].astype(np.float32) == tbdata2['c3'].astype(np.float32)) # c4 is a boolean column in the original table; we want ASCII # columns to convert these to columns of 'T'/'F' strings assert np.all(np.where(tbdata['c4'], 'T', 'F') == tbdata2['c4']) def test_pickle(self): """ Regression test for https://github.com/astropy/astropy/issues/1597 Tests for pickling FITS_rec objects """ # open existing FITS tables (images pickle by default, no test needed): with fits.open(self.data('tb.fits')) as btb: # Test column array is delayed and can pickle assert isinstance(btb[1].columns._arrays[0], Delayed) btb_pd = pickle.dumps(btb[1].data) btb_pl = pickle.loads(btb_pd) # It should not be delayed any more assert not isinstance(btb[1].columns._arrays[0], Delayed) assert comparerecords(btb_pl, btb[1].data) with fits.open(self.data('ascii.fits')) as asc: asc_pd = pickle.dumps(asc[1].data) asc_pl = pickle.loads(asc_pd) assert comparerecords(asc_pl, asc[1].data) with fits.open(self.data('random_groups.fits')) as rgr: rgr_pd = pickle.dumps(rgr[0].data) rgr_pl = pickle.loads(rgr_pd) assert comparerecords(rgr_pl, rgr[0].data) with fits.open(self.data('zerowidth.fits')) as zwc: # Doesn't pickle zero-width (_phanotm) column 'ORBPARM' zwc_pd = pickle.dumps(zwc[2].data) zwc_pl = pickle.loads(zwc_pd) with pytest.warns(UserWarning, match='Field 2 has a repeat count of 0'): assert comparerecords(zwc_pl, zwc[2].data) def test_zero_length_table(self): array = np.array([], dtype=[ ('a', 'i8'), ('b', 'S64'), ('c', ('i4', (3, 2)))]) hdu = fits.BinTableHDU(array) assert hdu.header['NAXIS1'] == 96 assert hdu.header['NAXIS2'] == 0 assert hdu.header['TDIM3'] == '(2,3)' field = hdu.data.field(1) assert field.shape == (0,) def test_dim_column_byte_order_mismatch(self): """ When creating a table column with non-trivial TDIMn, and big-endian array data read from an existing FITS file, the data should not be unnecessarily byteswapped. Regression test for https://github.com/astropy/astropy/issues/3561 """ data = fits.getdata(self.data('random_groups.fits'))['DATA'] col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)', format='1152E') thdu = fits.BinTableHDU.from_columns([col]) thdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert np.all(hdul[1].data['TEST'] == data) def test_fits_rec_from_existing(self): """ Tests creating a `FITS_rec` object with `FITS_rec.from_columns` from an existing `FITS_rec` object read from a FITS file. This ensures that the per-column arrays are updated properly. Regression test for https://github.com/spacetelescope/PyFITS/issues/99 """ # The use case that revealed this problem was trying to create a new # table from an existing table, but with additional rows so that we can # append data from a second table (with the same column structure) data1 = fits.getdata(self.data('tb.fits')) data2 = fits.getdata(self.data('tb.fits')) nrows = len(data1) + len(data2) merged = fits.FITS_rec.from_columns(data1, nrows=nrows) merged[len(data1):] = data2 mask = merged['c1'] > 1 masked = merged[mask] # The test table only has two rows, only the second of which is > 1 for # the 'c1' column assert comparerecords(data1[1:], masked[:1]) assert comparerecords(data1[1:], masked[1:]) # Double check that the original data1 table hasn't been affected by # its use in creating the "merged" table assert comparerecords(data1, fits.getdata(self.data('tb.fits'))) def test_update_string_column_inplace(self): """ Regression test for https://github.com/astropy/astropy/issues/4452 Ensure that changes to values in a string column are saved when a file is opened in ``mode='update'``. """ data = np.array([('abc',)], dtype=[('a', 'S3')]) fits.writeto(self.temp('test.fits'), data) with fits.open(self.temp('test.fits'), mode='update') as hdul: hdul[1].data['a'][0] = 'XYZ' assert hdul[1].data['a'][0] == 'XYZ' with fits.open(self.temp('test.fits')) as hdul: assert hdul[1].data['a'][0] == 'XYZ' # Test update but with a non-trivial TDIMn data = np.array([([['abc', 'def', 'geh'], ['ijk', 'lmn', 'opq']],)], dtype=[('a', ('S3', (2, 3)))]) fits.writeto(self.temp('test2.fits'), data) expected = [['abc', 'def', 'geh'], ['ijk', 'XYZ', 'opq']] with fits.open(self.temp('test2.fits'), mode='update') as hdul: assert hdul[1].header['TDIM1'] == '(3,3,2)' # Note: Previously I wrote data['a'][0][1, 1] to address # the single row. However, this is broken for chararray because # data['a'][0] does *not* return a view of the original array--this # is a bug in chararray though and not a bug in any FITS-specific # code so we'll roll with it for now... # (by the way the bug in question is fixed in newer Numpy versions) hdul[1].data['a'][0, 1, 1] = 'XYZ' assert np.all(hdul[1].data['a'][0] == expected) with fits.open(self.temp('test2.fits')) as hdul: assert hdul[1].header['TDIM1'] == '(3,3,2)' assert np.all(hdul[1].data['a'][0] == expected) @pytest.mark.skipif('not HAVE_OBJGRAPH') def test_reference_leak(self): """Regression test for https://github.com/astropy/astropy/pull/520""" def readfile(filename): with fits.open(filename) as hdul: data = hdul[1].data.copy() for colname in data.dtype.names: data[colname] with _refcounting('FITS_rec'): readfile(self.data('memtest.fits')) @pytest.mark.skipif('not HAVE_OBJGRAPH') @pytest.mark.slow def test_reference_leak2(self, tmpdir): """ Regression test for https://github.com/astropy/astropy/pull/4539 This actually re-runs a small set of tests that I found, during careful testing, exhibited the reference leaks fixed by #4539, but now with reference counting around each test to ensure that the leaks are fixed. """ from .test_core import TestCore from .test_connect import TestMultipleHDU t1 = TestCore() t1.setup() try: with _refcounting('FITS_rec'): t1.test_add_del_columns2() finally: t1.teardown() del t1 t2 = self.__class__() for test_name in ['test_recarray_to_bintablehdu', 'test_numpy_ndarray_to_bintablehdu', 'test_new_table_from_recarray', 'test_new_fitsrec']: t2.setup() try: with _refcounting('FITS_rec'): getattr(t2, test_name)() finally: t2.teardown() del t2 t3 = TestMultipleHDU() t3.setup_class() try: with _refcounting('FITS_rec'): t3.test_read(tmpdir) finally: t3.teardown_class() del t3 def test_dump_overwrite(self): with fits.open(self.data('table.fits')) as hdul: tbhdu = hdul[1] datafile = self.temp('data.txt') cdfile = self.temp('coldefs.txt') hfile = self.temp('header.txt') tbhdu.dump(datafile, cdfile, hfile) msg = (r"File .* already exists\. File .* already exists\. File " r".* already exists\. If you mean to replace the " r"file\(s\) then use the argument 'overwrite=True'\.") with pytest.raises(OSError, match=msg): tbhdu.dump(datafile, cdfile, hfile) tbhdu.dump(datafile, cdfile, hfile, overwrite=True) def test_pseudo_unsigned_ints(self): """ Tests updating a table column containing pseudo-unsigned ints. """ data = np.array([1, 2, 3], dtype=np.uint32) col = fits.Column(name='A', format='1J', bzero=2**31, array=data) thdu = fits.BinTableHDU.from_columns([col]) thdu.writeto(self.temp('test.fits')) # Test that the file wrote out correctly with fits.open(self.temp('test.fits'), uint=True) as hdul: hdu = hdul[1] assert 'TZERO1' in hdu.header assert hdu.header['TZERO1'] == 2**31 assert hdu.data['A'].dtype == np.dtype('uint32') assert np.all(hdu.data['A'] == data) # Test updating the unsigned int data hdu.data['A'][0] = 99 hdu.writeto(self.temp('test2.fits')) with fits.open(self.temp('test2.fits'), uint=True) as hdul: hdu = hdul[1] assert 'TZERO1' in hdu.header assert hdu.header['TZERO1'] == 2**31 assert hdu.data['A'].dtype == np.dtype('uint32') assert np.all(hdu.data['A'] == [99, 2, 3]) def test_column_with_scaling(self): """Check that a scaled column if correctly saved once it is modified. Regression test for https://github.com/astropy/astropy/issues/6887 """ c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'), format='1I', bscale=1, bzero=32768) S = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([c1])]) # Change value in memory S[1].data['c1'][0] = 2 S.writeto(self.temp("a.fits")) assert S[1].data['c1'] == 2 # Read and change value in memory with fits.open(self.temp("a.fits")) as X: X[1].data['c1'][0] = 10 assert X[1].data['c1'][0] == 10 # Write back to file X.writeto(self.temp("b.fits")) # Now check the file with fits.open(self.temp("b.fits")) as hdul: assert hdul[1].data['c1'][0] == 10 def test_ascii_inttypes(self): """ Test correct integer dtypes according to ASCII table field widths. Regression for https://github.com/astropy/astropy/issues/9899 """ i08 = np.array([2**3, 2**23, -2**22, 10, 2**23], dtype='i4') i10 = np.array([2**8, 2**31-1, -2**29, 30, 2**31-1], dtype='i8') i20 = np.array([2**16, 2**63-1, -2**63, 40, 2**63-1], dtype='i8') i02 = np.array([2**8, 2**13, -2**9, 50, 2**13], dtype='i2') t0 = Table([i08, i08*2, i10, i20, i02]) t1 = Table.read(self.data('ascii_i4-i20.fits')) assert t1.dtype == t0.dtype assert comparerecords(t1, t0) def test_ascii_floattypes(self): """Test different float formats.""" col1 = fits.Column(name='a', format='D', array=np.array([11.1, 12.2]), ascii=True) col2 = fits.Column(name='b', format='D16', array=np.array([15.5, 16.6]), ascii=True) col3 = fits.Column(name='c', format='D16.7', array=np.array([1.1, 2.2]), ascii=True) hdu = fits.TableHDU.from_columns([col1, col2, col3]) hdu.writeto(self.temp('foo.fits')) with fits.open(self.temp('foo.fits'), memmap=False) as hdul: assert comparerecords(hdul[1].data, hdu.data) @contextlib.contextmanager def _refcounting(type_): """ Perform the body of a with statement with reference counting for the given type (given by class name)--raises an assertion error if there are more unfreed objects of the given type than when we entered the with statement. """ gc.collect() refcount = len(objgraph.by_type(type_)) yield refcount gc.collect() assert len(objgraph.by_type(type_)) <= refcount, \ "More {0!r} objects still in memory than before." class TestVLATables(FitsTestCase): """Tests specific to tables containing variable-length arrays.""" def test_variable_length_columns(self): def test(format_code): col = fits.Column(name='QUAL_SPE', format=format_code, array=[[0] * 1571] * 225) tb_hdu = fits.BinTableHDU.from_columns([col]) pri_hdu = fits.PrimaryHDU() hdu_list = fits.HDUList([pri_hdu, tb_hdu]) hdu_list.writeto(self.temp('toto.fits'), overwrite=True) with fits.open(self.temp('toto.fits')) as toto: q = toto[1].data.field('QUAL_SPE') assert (q[0][4:8] == np.array([0, 0, 0, 0], dtype=np.uint8)).all() assert toto[1].columns[0].format.endswith('J(1571)') for code in ('PJ()', 'QJ()'): test(code) def test_extend_variable_length_array(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54""" def test(format_code): arr = [[1] * 10] * 10 col1 = fits.Column(name='TESTVLF', format=format_code, array=arr) col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10) tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15) # This asserts that the normal 'scalar' column's length was extended assert len(tb_hdu.data['TESTSCA']) == 15 # And this asserts that the VLF column was extended in the same manner assert len(tb_hdu.data['TESTVLF']) == 15 # We can't compare the whole array since the _VLF is an array of # objects, but comparing just the edge case rows should suffice assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all() assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all() assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all() assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all() for code in ('PJ()', 'QJ()'): test(code) def test_variable_length_table_format_pd_from_object_array(self): def test(format_code): a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])], 'O') acol = fits.Column(name='testa', format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) tbhdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as tbhdu1: assert tbhdu1[1].columns[0].format.endswith('D(2)') for j in range(3): for i in range(len(a[j])): assert tbhdu1[1].data.field(0)[j][i] == a[j][i] for code in ('PD()', 'QD()'): test(code) def test_variable_length_table_format_pd_from_list(self): def test(format_code): a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])] acol = fits.Column(name='testa', format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) tbhdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as tbhdu1: assert tbhdu1[1].columns[0].format.endswith('D(2)') for j in range(3): for i in range(len(a[j])): assert tbhdu1[1].data.field(0)[j][i] == a[j][i] for code in ('PD()', 'QD()'): test(code) def test_variable_length_table_format_pa_from_object_array(self): def test(format_code): a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']), np.array(['f'])], 'O') acol = fits.Column(name='testa', format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) tbhdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as hdul: assert hdul[1].columns[0].format.endswith('A(3)') for j in range(3): for i in range(len(a[j])): assert hdul[1].data.field(0)[j][i] == a[j][i] for code in ('PA()', 'QA()'): test(code) def test_variable_length_table_format_pa_from_list(self): def test(format_code): a = ['a', 'ab', 'abc'] acol = fits.Column(name='testa', format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) tbhdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as hdul: assert hdul[1].columns[0].format.endswith('A(3)') for j in range(3): for i in range(len(a[j])): assert hdul[1].data.field(0)[j][i] == a[j][i] for code in ('PA()', 'QA()'): test(code) def test_getdata_vla(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200""" def test(format_code): col = fits.Column(name='QUAL_SPE', format=format_code, array=[np.arange(1572)] * 225) tb_hdu = fits.BinTableHDU.from_columns([col]) pri_hdu = fits.PrimaryHDU() hdu_list = fits.HDUList([pri_hdu, tb_hdu]) hdu_list.writeto(self.temp('toto.fits'), overwrite=True) data = fits.getdata(self.temp('toto.fits')) # Need to compare to the original data row by row since the FITS_rec # returns an array of _VLA objects for row_a, row_b in zip(data['QUAL_SPE'], col.array): assert (row_a == row_b).all() for code in ('PJ()', 'QJ()'): test(code) @pytest.mark.skipif(not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == 'win32', reason='https://github.com/numpy/numpy/issues/20699') def test_copy_vla(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/47 """ # Make a file containing a couple of VLA tables arr1 = [np.arange(n + 1) for n in range(255)] arr2 = [np.arange(255, 256 + n) for n in range(255)] # A dummy non-VLA column needed to reproduce issue #47 c = fits.Column('test', format='J', array=np.arange(255)) c1 = fits.Column('A', format='PJ', array=arr1) c2 = fits.Column('B', format='PJ', array=arr2) t1 = fits.BinTableHDU.from_columns([c, c1]) t2 = fits.BinTableHDU.from_columns([c, c2]) hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2]) hdul.writeto(self.temp('test.fits'), overwrite=True) # Just test that the test file wrote out correctly with fits.open(self.temp('test.fits')) as h: assert h[1].header['TFORM2'] == 'PJ(255)' assert h[2].header['TFORM2'] == 'PJ(255)' assert comparerecords(h[1].data, t1.data) assert comparerecords(h[2].data, t2.data) # Try copying the second VLA and writing to a new file with fits.open(self.temp('test.fits')) as h: new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header) new_hdu.writeto(self.temp('test3.fits')) with fits.open(self.temp('test3.fits')) as h2: assert comparerecords(h2[1].data, t2.data) new_hdul = fits.HDUList([fits.PrimaryHDU()]) new_hdul.writeto(self.temp('test2.fits')) # Open several copies of the test file and append copies of the second # VLA table with fits.open(self.temp('test2.fits'), mode='append') as new_hdul: for _ in range(2): with fits.open(self.temp('test.fits')) as h: new_hdul.append(h[2]) new_hdul.flush() # Test that all the VLA copies wrote correctly with fits.open(self.temp('test2.fits')) as new_hdul: for idx in range(1, 3): assert comparerecords(new_hdul[idx].data, t2.data) def test_vla_with_gap(self): hdul = fits.open(self.data('theap-gap.fits')) data = hdul[1].data assert data.shape == (500,) assert data['i'][497] == 497 assert np.array_equal(data['arr'][497], [0, 1, 2, 3, 4]) hdul.close() def test_tolist(self): col = fits.Column( name='var', format='PI()', array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_)) hdu = fits.BinTableHDU.from_columns([col]) assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]] assert hdu.data['var'].tolist() == [[1, 2, 3], [11, 12]] def test_tolist_from_file(self): filename = self.data('variable_length_table.fits') with fits.open(filename) as hdul: hdu = hdul[1] assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]] assert hdu.data['var'].tolist() == [[45, 56], [11, 12, 13]] @pytest.mark.skipif('sys.maxsize < 2**32') @pytest.mark.skipif('sys.platform == "win32"') @pytest.mark.hugemem def test_heapsize_P_limit(self): """ Regression test for https://github.com/astropy/astropy/issues/10812 Check if the error is raised when the heap size is bigger than what can be indexed with a 32 bit signed int. """ # a matrix with variable length array elements is created nelem = 2**28 matrix = np.zeros(1, dtype=np.object_) matrix[0] = np.arange(0., float(nelem+1)) col = fits.Column(name='MATRIX', format=f'PD({nelem})', unit='', array=matrix) t = fits.BinTableHDU.from_columns([col]) t.name = 'MATRIX' with pytest.raises(ValueError, match="Please consider using the 'Q' format for your file."): t.writeto(self.temp('matrix.fits')) # These are tests that solely test the Column and ColDefs interfaces and # related functionality without directly involving full tables; currently there # are few of these but I expect there to be more as I improve the test coverage class TestColumnFunctions(FitsTestCase): def test_column_format_interpretation(self): """ Test to ensure that when Numpy-style record formats are passed in to the Column constructor for the format argument, they are recognized so long as it's unambiguous (where "unambiguous" here is questionable since Numpy is case insensitive when parsing the format codes. But their "proper" case is lower-case, so we can accept that. Basically, actually, any key in the NUMPY2FITS dict should be accepted. """ for recformat, fitsformat in NUMPY2FITS.items(): c = fits.Column('TEST', np.dtype(recformat)) c.format == fitsformat c = fits.Column('TEST', recformat) c.format == fitsformat c = fits.Column('TEST', fitsformat) c.format == fitsformat # Test a few cases that are ambiguous in that they *are* valid binary # table formats though not ones that are likely to be used, but are # also valid common ASCII table formats c = fits.Column('TEST', 'I4') assert c.format == 'I4' assert c.format.format == 'I' assert c.format.width == 4 c = fits.Column('TEST', 'F15.8') assert c.format == 'F15.8' assert c.format.format == 'F' assert c.format.width == 15 assert c.format.precision == 8 c = fits.Column('TEST', 'E15.8') assert c.format.format == 'E' assert c.format.width == 15 assert c.format.precision == 8 c = fits.Column('TEST', 'D15.8') assert c.format.format == 'D' assert c.format.width == 15 assert c.format.precision == 8 # zero-precision should be allowed as well, for float types # https://github.com/astropy/astropy/issues/3422 c = fits.Column('TEST', 'F10.0') assert c.format.format == 'F' assert c.format.width == 10 assert c.format.precision == 0 c = fits.Column('TEST', 'E10.0') assert c.format.format == 'E' assert c.format.width == 10 assert c.format.precision == 0 c = fits.Column('TEST', 'D10.0') assert c.format.format == 'D' assert c.format.width == 10 assert c.format.precision == 0 # These are a couple cases where the format code is a valid binary # table format, and is not strictly a valid ASCII table format but # could be *interpreted* as one by appending a default width. This # will only happen either when creating an ASCII table or when # explicitly specifying ascii=True when the column is created c = fits.Column('TEST', 'I') assert c.format == 'I' assert c.format.recformat == 'i2' c = fits.Column('TEST', 'I', ascii=True) assert c.format == 'I10' assert c.format.recformat == 'i4' # With specified widths, integer precision should be set appropriately c = fits.Column('TEST', 'I4', ascii=True) assert c.format == 'I4' assert c.format.recformat == 'i2' c = fits.Column('TEST', 'I9', ascii=True) assert c.format == 'I9' assert c.format.recformat == 'i4' c = fits.Column('TEST', 'I12', ascii=True) assert c.format == 'I12' assert c.format.recformat == 'i8' c = fits.Column('TEST', 'E') assert c.format == 'E' assert c.format.recformat == 'f4' c = fits.Column('TEST', 'E', ascii=True) assert c.format == 'E15.7' # F is not a valid binary table format so it should be unambiguously # treated as an ASCII column c = fits.Column('TEST', 'F') assert c.format == 'F16.7' c = fits.Column('TEST', 'D') assert c.format == 'D' assert c.format.recformat == 'f8' c = fits.Column('TEST', 'D', ascii=True) assert c.format == 'D25.17' def test_zero_precision_float_column(self): """ Regression test for https://github.com/astropy/astropy/issues/3422 """ c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3]) # The decimal places will be clipped t = fits.TableHDU.from_columns([c]) t.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert hdul[1].header['TFORM1'] == 'F5.0' assert hdul[1].data['TEST'].dtype == np.dtype('float64') assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0]) # Check how the raw data looks raw = np.rec.recarray.field(hdul[1].data, 'TEST') assert raw.tobytes() == b' 1. 2. 3.' def test_column_array_type_mismatch(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218""" arr = [-99] * 20 col = fits.Column('mag', format='E', array=arr) assert (arr == col.array).all() def test_new_coldefs_with_invalid_seqence(self): """Test that a TypeError is raised when a ColDefs is instantiated with a sequence of non-Column objects. """ pytest.raises(TypeError, fits.ColDefs, [1, 2, 3]) def test_coldefs_init_from_array(self): """Test that ColDefs._init_from_array works with single element data- types as well as multi-element data-types """ nd_array = np.ndarray((1,), dtype=[('A', '<u4', (2,)), ('B', '>u2')]) col_defs = fits.column.ColDefs(nd_array) assert 2**31 == col_defs['A'].bzero assert 2**15 == col_defs['B'].bzero def test_pickle(self): """ Regression test for https://github.com/astropy/astropy/issues/1597 Tests for pickling FITS_rec objects """ # open existing FITS tables (images pickle by default, no test needed): with fits.open(self.data('tb.fits')) as btb: # Test column array is delayed and can pickle assert isinstance(btb[1].columns._arrays[0], Delayed) btb_pd = pickle.dumps(btb[1].data) btb_pl = pickle.loads(btb_pd) # It should not be delayed any more assert not isinstance(btb[1].columns._arrays[0], Delayed) assert comparerecords(btb_pl, btb[1].data) with fits.open(self.data('ascii.fits')) as asc: asc_pd = pickle.dumps(asc[1].data) asc_pl = pickle.loads(asc_pd) assert comparerecords(asc_pl, asc[1].data) with fits.open(self.data('random_groups.fits')) as rgr: rgr_pd = pickle.dumps(rgr[0].data) rgr_pl = pickle.loads(rgr_pd) assert comparerecords(rgr_pl, rgr[0].data) with fits.open(self.data('zerowidth.fits')) as zwc: # Doesn't pickle zero-width (_phanotm) column 'ORBPARM' zwc_pd = pickle.dumps(zwc[2].data) zwc_pl = pickle.loads(zwc_pd) with pytest.warns(UserWarning, match=r'Field 2 has a repeat count ' r'of 0 in its format code'): assert comparerecords(zwc_pl, zwc[2].data) def test_column_lookup_by_name(self): """Tests that a `ColDefs` can be indexed by column name.""" a = fits.Column(name='a', format='D') b = fits.Column(name='b', format='D') cols = fits.ColDefs([a, b]) assert cols['a'] == cols[0] assert cols['b'] == cols[1] def test_column_attribute_change_after_removal(self): """ This is a test of the column attribute change notification system. After a column has been removed from a table (but other references are kept to that same column) changes to that column's attributes should not trigger a notification on the table it was removed from. """ # One way we can check this is to ensure there are no further changes # to the header table = fits.BinTableHDU.from_columns([ fits.Column('a', format='D'), fits.Column('b', format='D')]) b = table.columns['b'] table.columns.del_col('b') assert table.data.dtype.names == ('a',) b.name = 'HELLO' assert b.name == 'HELLO' assert 'TTYPE2' not in table.header assert table.header['TTYPE1'] == 'a' assert table.columns.names == ['a'] with pytest.raises(KeyError): table.columns['b'] # Make sure updates to the remaining column still work table.columns.change_name('a', 'GOODBYE') with pytest.raises(KeyError): table.columns['a'] assert table.columns['GOODBYE'].name == 'GOODBYE' assert table.data.dtype.names == ('GOODBYE',) assert table.columns.names == ['GOODBYE'] assert table.data.columns.names == ['GOODBYE'] table.columns['GOODBYE'].name = 'foo' with pytest.raises(KeyError): table.columns['GOODBYE'] assert table.columns['foo'].name == 'foo' assert table.data.dtype.names == ('foo',) assert table.columns.names == ['foo'] assert table.data.columns.names == ['foo'] def test_x_column_deepcopy(self): """ Regression test for https://github.com/astropy/astropy/pull/4514 Tests that columns with the X (bit array) format can be deep-copied. """ c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0]) c2 = copy.deepcopy(c) assert c2.name == c.name assert c2.format == c.format assert np.all(c2.array == c.array) def test_p_column_deepcopy(self): """ Regression test for https://github.com/astropy/astropy/pull/4514 Tests that columns with the P/Q formats (variable length arrays) can be deep-copied. """ c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]]) c2 = copy.deepcopy(c) assert c2.name == c.name assert c2.format == c.format assert np.all(c2.array[0] == c.array[0]) assert np.all(c2.array[1] == c.array[1]) c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]]) c4 = copy.deepcopy(c3) assert c4.name == c3.name assert c4.format == c3.format assert np.all(c4.array[0] == c3.array[0]) assert np.all(c4.array[1] == c3.array[1]) def test_column_verify_keywords(self): """ Test that the keyword arguments used to initialize a Column, specifically those that typically read from a FITS header (so excluding array), are verified to have a valid value. """ with pytest.raises(AssertionError) as err: _ = fits.Column(1, format='I', array=[1, 2, 3, 4, 5]) assert 'Column name must be a string able to fit' in str(err.value) with pytest.raises(VerifyError) as err: _ = fits.Column('col', format=0, null='Nan', disp=1, coord_type=1, coord_unit=2, coord_inc='1', time_ref_pos=1, coord_ref_point='1', coord_ref_value='1') err_msgs = ['keyword arguments to Column were invalid', 'TFORM', 'TNULL', 'TDISP', 'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'] for msg in err_msgs: assert msg in str(err.value) def test_column_verify_start(self): """ Regression test for https://github.com/astropy/astropy/pull/6359 Test the validation of the column start position option (ASCII table only), corresponding to ``TBCOL`` keyword. Test whether the VerifyError message generated is the one with highest priority, i.e. the order of error messages to be displayed is maintained. """ with pytest.raises(VerifyError) as err: _ = fits.Column('a', format='B', start='a', array=[1, 2, 3]) assert "start option (TBCOLn) is not allowed for binary table columns" in str(err.value) with pytest.raises(VerifyError) as err: _ = fits.Column('a', format='I', start='a', array=[1, 2, 3]) assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(err.value) with pytest.raises(VerifyError) as err: _ = fits.Column('a', format='I', start='-56', array=[1, 2, 3]) assert "start option (TBCOLn) must be a positive integer (got -56)." in str(err.value) @pytest.mark.parametrize('keys', [{'TFORM': 'Z', 'TDISP': 'E'}, {'TFORM': '2', 'TDISP': '2E'}, {'TFORM': 3, 'TDISP': 6.3}, {'TFORM': float, 'TDISP': np.float64}, {'TFORM': '', 'TDISP': 'E.5'}]) def test_column_verify_formats(self, keys): """ Additional tests for verification of 'TFORM' and 'TDISP' keyword arguments used to initialize a Column. """ with pytest.raises(VerifyError) as err: _ = fits.Column('col', format=keys['TFORM'], disp=keys['TDISP']) for key in keys.keys(): assert key in str(err.value) assert str(keys[key]) in str(err.value) def test_regression_5383(): # Regression test for an undefined variable x = np.array([1, 2, 3]) col = fits.Column(name='a', array=x, format='E') hdu = fits.BinTableHDU.from_columns([col]) del hdu._header['TTYPE1'] hdu.columns[0].name = 'b' def test_table_to_hdu(): from astropy.table import Table table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]], names=['a', 'b', 'c'], dtype=['i', 'U1', 'f']) table['a'].unit = 'm/s' table['b'].unit = 'not-a-unit' table.meta['foo'] = 'bar' with pytest.warns(UnitsWarning, match="'not-a-unit' did not parse as" " fits unit") as w: hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1})) assert len(w) == 1 for name in 'abc': assert np.array_equal(table[name], hdu.data[name]) # Check that TUNITn cards appear in the correct order # (https://github.com/astropy/astropy/pull/5720) assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2') assert hdu.header['FOO'] == 'bar' assert hdu.header['TEST'] == 1 def test_regression_scalar_indexing(): # Indexing a FITS_rec with a tuple that returns a scalar record # should work x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)]).view(fits.FITS_rec) x1a = x[1] # this should succeed. x1b = x[(1,)] # FITS_record does not define __eq__; so test elements. assert all(a == b for a, b in zip(x1a, x1b)) def test_new_column_attributes_preserved(tmpdir): # Regression test for https://github.com/astropy/astropy/issues/7145 # This makes sure that for now we don't clear away keywords that have # newly been recognized (in Astropy 3.0) as special column attributes but # instead just warn that we might do so in future. The new keywords are: # TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS col = [] col.append(fits.Column(name="TIME", format="1E", unit="s")) col.append(fits.Column(name="RAWX", format="1I", unit="pixel")) col.append(fits.Column(name="RAWY", format="1I")) cd = fits.ColDefs(col) hdr = fits.Header() # Keywords that will get ignored in favor of these in the data hdr['TUNIT1'] = 'pixel' hdr['TUNIT2'] = 'm' hdr['TUNIT3'] = 'm' # Keywords that were added in Astropy 3.0 that should eventually be # ignored and set on the data instead hdr['TCTYP2'] = 'RA---TAN' hdr['TCTYP3'] = 'ANGLE' hdr['TCRVL2'] = -999.0 hdr['TCRVL3'] = -999.0 hdr['TCRPX2'] = 1.0 hdr['TCRPX3'] = 1.0 hdr['TALEN2'] = 16384 hdr['TALEN3'] = 1024 hdr['TCUNI2'] = 'angstrom' hdr['TCUNI3'] = 'deg' # Other non-relevant keywords hdr['RA'] = 1.5 hdr['DEC'] = 3.0 with pytest.warns(AstropyDeprecationWarning) as warning_list: hdu = fits.BinTableHDU.from_columns(cd, hdr) assert str(warning_list[0].message).startswith( "The following keywords are now recognized as special") # First, check that special keywords such as TUNIT are ignored in the header # We may want to change that behavior in future, but this is the way it's # been for a while now. assert hdu.columns[0].unit == 's' assert hdu.columns[1].unit == 'pixel' assert hdu.columns[2].unit is None assert hdu.header['TUNIT1'] == 's' assert hdu.header['TUNIT2'] == 'pixel' assert 'TUNIT3' not in hdu.header # TUNIT3 was removed # Now, check that the new special keywords are actually still there # but weren't used to set the attributes on the data assert hdu.columns[0].coord_type is None assert hdu.columns[1].coord_type is None assert hdu.columns[2].coord_type is None assert 'TCTYP1' not in hdu.header assert hdu.header['TCTYP2'] == 'RA---TAN' assert hdu.header['TCTYP3'] == 'ANGLE' # Make sure that other keywords are still there assert hdu.header['RA'] == 1.5 assert hdu.header['DEC'] == 3.0 # Now we can write this HDU to a file and re-load. Re-loading *should* # cause the special column attribtues to be picked up (it's just that when a # header is manually specified, these values are ignored) filename = tmpdir.join('test.fits').strpath hdu.writeto(filename) # Make sure we don't emit a warning in this case with warnings.catch_warnings(record=True) as warning_list: with fits.open(filename) as hdul: hdu2 = hdul[1] assert len(warning_list) == 0 # Check that column attributes are now correctly set assert hdu2.columns[0].unit == 's' assert hdu2.columns[1].unit == 'pixel' assert hdu2.columns[2].unit is None assert hdu2.header['TUNIT1'] == 's' assert hdu2.header['TUNIT2'] == 'pixel' assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed # Now, check that the new special keywords are actually still there # but weren't used to set the attributes on the data assert hdu2.columns[0].coord_type is None assert hdu2.columns[1].coord_type == 'RA---TAN' assert hdu2.columns[2].coord_type == 'ANGLE' assert 'TCTYP1' not in hdu2.header assert hdu2.header['TCTYP2'] == 'RA---TAN' assert hdu2.header['TCTYP3'] == 'ANGLE' # Make sure that other keywords are still there assert hdu2.header['RA'] == 1.5 assert hdu2.header['DEC'] == 3.0 def test_empty_table(tmpdir): ofile = str(tmpdir.join('emptytable.fits')) hdu = fits.BinTableHDU(header=None, data=None, name='TEST') hdu.writeto(ofile) with fits.open(ofile) as hdul: assert hdul['TEST'].data.size == 0 ofile = str(tmpdir.join('emptytable.fits.gz')) hdu = fits.BinTableHDU(header=None, data=None, name='TEST') hdu.writeto(ofile, overwrite=True) with fits.open(ofile) as hdul: assert hdul['TEST'].data.size == 0 def test_a3dtable(tmpdir): testfile = str(tmpdir.join('test.fits')) hdu = fits.BinTableHDU.from_columns([ fits.Column(name='FOO', format='J', array=np.arange(10)) ]) hdu.header['XTENSION'] = 'A3DTABLE' hdu.writeto(testfile, output_verify='ignore') with fits.open(testfile) as hdul: assert hdul[1].header['XTENSION'] == 'A3DTABLE' with pytest.warns(AstropyUserWarning) as w: hdul.verify('fix') assert str(w[0].message) == 'Verification reported errors:' assert str(w[2].message).endswith( 'Converted the XTENSION keyword to BINTABLE.') assert hdul[1].header['XTENSION'] == 'BINTABLE' def test_invalid_file(tmp_path): hdu = fits.BinTableHDU() # little trick to write an invalid card ... hdu.header['FOO'] = None hdu.header.cards['FOO']._value = np.nan testfile = tmp_path / 'test.fits' hdu.writeto(testfile, output_verify='ignore') with fits.open(testfile) as hdul: assert hdul[1].data is not None def test_unit_parse_strict(tmp_path): path = tmp_path / 'invalid_unit.fits' # this is a unit parseable by the generic format but invalid for FITS invalid_unit = '1 / (MeV sr s)' unit = Unit(invalid_unit) t = Table({'a': [1, 2, 3]}) t.write(path) with fits.open(path, mode='update') as hdul: hdul[1].header['TUNIT1'] = invalid_unit # default is "warn" with pytest.warns(UnitsWarning): t = Table.read(path) assert isinstance(t['a'].unit, UnrecognizedUnit) t = Table.read(path, unit_parse_strict='silent') assert isinstance(t['a'].unit, UnrecognizedUnit) with pytest.raises(ValueError): Table.read(path, unit_parse_strict='raise') with pytest.warns(UnitsWarning): Table.read(path, unit_parse_strict='warn')
dc6b17912ac92237fe1afa3c07d87ee250a74773d3edbb4fe57faf0af7083690
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from astropy.io.fits.column import Column from astropy.io.fits.diff import (FITSDiff, HeaderDiff, ImageDataDiff, TableDataDiff, HDUDiff) from astropy.io.fits.hdu import HDUList, PrimaryHDU, ImageHDU from astropy.io.fits.hdu.base import NonstandardExtHDU from astropy.io.fits.hdu.table import BinTableHDU from astropy.io.fits.header import Header from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH from astropy.io import fits from . import FitsTestCase class DummyNonstandardExtHDU(NonstandardExtHDU): def __init__(self, data=None, *args, **kwargs): super().__init__(self, *args, **kwargs) self._buffer = np.asarray(data).tobytes() self._data_offset = 0 @property def size(self): return len(self._buffer) class TestDiff(FitsTestCase): def test_identical_headers(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() assert HeaderDiff(ha, hb).identical assert HeaderDiff(ha.tostring(), hb.tostring()).identical with pytest.raises(TypeError): HeaderDiff(1, 2) def test_slightly_different_headers(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 assert not HeaderDiff(ha, hb).identical def test_common_keywords(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 hb['D'] = (5, 'Comment') assert HeaderDiff(ha, hb).common_keywords == ['A', 'B', 'C'] def test_different_keyword_count(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() del hb['B'] diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keyword_count == (3, 2) # But make sure the common keywords are at least correct assert diff.common_keywords == ['A', 'C'] def test_different_keywords(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 hb['D'] = (5, 'Comment') ha['E'] = (6, 'Comment') ha['F'] = (7, 'Comment') diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keywords == (['E', 'F'], ['D']) def test_different_keyword_values(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keyword_values == {'C': [(3, 4)]} def test_different_keyword_comments(self): ha = Header([('A', 1), ('B', 2), ('C', 3, 'comment 1')]) hb = ha.copy() hb.comments['C'] = 'comment 2' diff = HeaderDiff(ha, hb) assert not diff.identical assert (diff.diff_keyword_comments == {'C': [('comment 1', 'comment 2')]}) def test_different_keyword_values_with_duplicate(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() ha.append(('C', 4)) hb.append(('C', 5)) diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keyword_values == {'C': [None, (4, 5)]} def test_asymmetric_duplicate_keywords(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() ha.append(('A', 2, 'comment 1')) ha.append(('A', 3, 'comment 2')) hb.append(('B', 4, 'comment 3')) hb.append(('C', 5, 'comment 4')) diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keyword_values == {} assert (diff.diff_duplicate_keywords == {'A': (3, 1), 'B': (1, 2), 'C': (1, 2)}) report = diff.report() assert ("Inconsistent duplicates of keyword 'A' :\n" " Occurs 3 time(s) in a, 1 times in (b)") in report def test_floating_point_rtol(self): ha = Header([('A', 1), ('B', 2.00001), ('C', 3.000001)]) hb = ha.copy() hb['B'] = 2.00002 hb['C'] = 3.000002 diff = HeaderDiff(ha, hb) assert not diff.identical assert (diff.diff_keyword_values == {'B': [(2.00001, 2.00002)], 'C': [(3.000001, 3.000002)]}) diff = HeaderDiff(ha, hb, rtol=1e-6) assert not diff.identical assert diff.diff_keyword_values == {'B': [(2.00001, 2.00002)]} diff = HeaderDiff(ha, hb, rtol=1e-5) assert diff.identical def test_floating_point_atol(self): ha = Header([('A', 1), ('B', 1.0), ('C', 0.0)]) hb = ha.copy() hb['B'] = 1.00001 hb['C'] = 0.000001 diff = HeaderDiff(ha, hb, rtol=1e-6) assert not diff.identical assert (diff.diff_keyword_values == {'B': [(1.0, 1.00001)], 'C': [(0.0, 0.000001)]}) diff = HeaderDiff(ha, hb, rtol=1e-5) assert not diff.identical assert (diff.diff_keyword_values == {'C': [(0.0, 0.000001)]}) diff = HeaderDiff(ha, hb, atol=1e-6) assert not diff.identical assert (diff.diff_keyword_values == {'B': [(1.0, 1.00001)]}) diff = HeaderDiff(ha, hb, atol=1e-5) # strict inequality assert not diff.identical assert (diff.diff_keyword_values == {'B': [(1.0, 1.00001)]}) diff = HeaderDiff(ha, hb, rtol=1e-5, atol=1e-5) assert diff.identical diff = HeaderDiff(ha, hb, atol=1.1e-5) assert diff.identical diff = HeaderDiff(ha, hb, rtol=1e-6, atol=1e-6) assert not diff.identical def test_ignore_blanks(self): with fits.conf.set_temp('strip_header_whitespace', False): ha = Header([('A', 1), ('B', 2), ('C', 'A ')]) hb = ha.copy() hb['C'] = 'A' assert ha['C'] != hb['C'] diff = HeaderDiff(ha, hb) # Trailing blanks are ignored by default assert diff.identical assert diff.diff_keyword_values == {} # Don't ignore blanks diff = HeaderDiff(ha, hb, ignore_blanks=False) assert not diff.identical assert diff.diff_keyword_values == {'C': [('A ', 'A')]} @pytest.mark.parametrize("differ", [HeaderDiff, HDUDiff, FITSDiff]) def test_ignore_blank_cards(self, differ): """Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/152 Ignore blank cards. """ ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = Header([('A', 1), ('', ''), ('B', 2), ('', ''), ('C', 3)]) hc = ha.copy() if differ is HeaderDiff: hc.append() hc.append() else: # Ensure blanks are not at the end as they are stripped by HDUs hc.add_blank(after=-2) hc.add_blank(after=-2) if differ in (HDUDiff, FITSDiff): # wrap it in a PrimaryHDU ha, hb, hc = (PrimaryHDU(np.arange(10), h) for h in (ha, hb, hc)) hc_header = hc.header if differ is FITSDiff: # wrap it in a HDUList ha, hb, hc = (HDUList([h]) for h in (ha, hb, hc)) hc_header = hc[0].header # We now have a header with interleaved blanks, and a header with end # blanks, both of which should ignore the blanks assert differ(ha, hb).identical assert differ(ha, hc).identical assert differ(hb, hc).identical assert not differ(ha, hb, ignore_blank_cards=False).identical assert not differ(ha, hc, ignore_blank_cards=False).identical # Both hb and hc have the same number of blank cards; since order is # currently ignored, these should still be identical even if blank # cards are not ignored assert differ(hb, hc, ignore_blank_cards=False).identical if differ is HeaderDiff: hc.append() else: # Ensure blanks are not at the end as they are stripped by HDUs hc_header.add_blank(after=-2) # But now there are different numbers of blanks, so they should not be # ignored: assert not differ(hb, hc, ignore_blank_cards=False).identical def test_ignore_hdus(self): a = np.arange(100).reshape(10, 10) b = a.copy() ha = Header([('A', 1), ('B', 2), ('C', 3)]) xa = np.array([(1.0, 1), (3.0, 4)], dtype=[('x', float), ('y', int)]) xb = np.array([(1.0, 2), (3.0, 5)], dtype=[('x', float), ('y', int)]) phdu = PrimaryHDU(header=ha) ihdua = ImageHDU(data=a, name='SCI') ihdub = ImageHDU(data=b, name='SCI') bhdu1 = BinTableHDU(data=xa, name='ASDF') bhdu2 = BinTableHDU(data=xb, name='ASDF') hdula = HDUList([phdu, ihdua, bhdu1]) hdulb = HDUList([phdu, ihdub, bhdu2]) # ASDF extension should be different diff = FITSDiff(hdula, hdulb) assert not diff.identical assert diff.diff_hdus[0][0] == 2 # ASDF extension should be ignored diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASDF']) assert diff.identical, diff.report() diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASD*']) assert diff.identical, diff.report() # SCI extension should be different hdulb['SCI'].data += 1 diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASDF']) assert not diff.identical # SCI and ASDF extensions should be ignored diff = FITSDiff(hdula, hdulb, ignore_hdus=['SCI', 'ASDF']) assert diff.identical, diff.report() # All EXTVER of SCI should be ignored ihduc = ImageHDU(data=a, name='SCI', ver=2) hdulb.append(ihduc) diff = FITSDiff(hdula, hdulb, ignore_hdus=['SCI', 'ASDF']) assert not any(diff.diff_hdus), diff.report() assert any(diff.diff_hdu_count), diff.report() def test_ignore_keyword_values(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['B'] = 4 hb['C'] = 5 diff = HeaderDiff(ha, hb, ignore_keywords=['*']) assert diff.identical diff = HeaderDiff(ha, hb, ignore_keywords=['B']) assert not diff.identical assert diff.diff_keyword_values == {'C': [(3, 5)]} report = diff.report() assert 'Keyword B has different values' not in report assert 'Keyword C has different values' in report # Test case-insensitivity diff = HeaderDiff(ha, hb, ignore_keywords=['b']) assert not diff.identical assert diff.diff_keyword_values == {'C': [(3, 5)]} def test_ignore_keyword_comments(self): ha = Header([('A', 1, 'A'), ('B', 2, 'B'), ('C', 3, 'C')]) hb = ha.copy() hb.comments['B'] = 'D' hb.comments['C'] = 'E' diff = HeaderDiff(ha, hb, ignore_comments=['*']) assert diff.identical diff = HeaderDiff(ha, hb, ignore_comments=['B']) assert not diff.identical assert diff.diff_keyword_comments == {'C': [('C', 'E')]} report = diff.report() assert 'Keyword B has different comments' not in report assert 'Keyword C has different comments' in report # Test case-insensitivity diff = HeaderDiff(ha, hb, ignore_comments=['b']) assert not diff.identical assert diff.diff_keyword_comments == {'C': [('C', 'E')]} def test_trivial_identical_images(self): ia = np.arange(100).reshape(10, 10) ib = np.arange(100).reshape(10, 10) diff = ImageDataDiff(ia, ib) assert diff.identical assert diff.diff_total == 0 def test_identical_within_relative_tolerance(self): ia = np.ones((10, 10)) - 0.00001 ib = np.ones((10, 10)) - 0.00002 diff = ImageDataDiff(ia, ib, rtol=1.0e-4) assert diff.identical assert diff.diff_total == 0 def test_identical_within_absolute_tolerance(self): ia = np.zeros((10, 10)) - 0.00001 ib = np.zeros((10, 10)) - 0.00002 diff = ImageDataDiff(ia, ib, rtol=1.0e-4) assert not diff.identical assert diff.diff_total == 100 diff = ImageDataDiff(ia, ib, atol=1.0e-4) assert diff.identical assert diff.diff_total == 0 def test_identical_within_rtol_and_atol(self): ia = np.zeros((10, 10)) - 0.00001 ib = np.zeros((10, 10)) - 0.00002 diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-5) assert diff.identical assert diff.diff_total == 0 def test_not_identical_within_rtol_and_atol(self): ia = np.zeros((10, 10)) - 0.00001 ib = np.zeros((10, 10)) - 0.00002 diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-6) assert not diff.identical assert diff.diff_total == 100 def test_identical_comp_image_hdus(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/189 For this test we mostly just care that comparing to compressed images does not crash, and returns the correct results. Two compressed images will be considered identical if the decompressed data is the same. Obviously we test whether or not the same compression was used by looking for (or ignoring) header differences. """ data = np.arange(100.0).reshape(10, 10) hdu = fits.CompImageHDU(data=data) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdula, \ fits.open(self.temp('test.fits')) as hdulb: diff = FITSDiff(hdula, hdulb) assert diff.identical def test_different_dimensions(self): ia = np.arange(100).reshape(10, 10) ib = np.arange(100) - 1 # Although ib could be reshaped into the same dimensions, for now the # data is not compared anyways diff = ImageDataDiff(ia, ib) assert not diff.identical assert diff.diff_dimensions == ((10, 10), (100,)) assert diff.diff_total == 0 report = diff.report() assert 'Data dimensions differ' in report assert 'a: 10 x 10' in report assert 'b: 100' in report assert 'No further data comparison performed.' def test_different_pixels(self): ia = np.arange(100).reshape(10, 10) ib = np.arange(100).reshape(10, 10) ib[0, 0] = 10 ib[5, 5] = 20 diff = ImageDataDiff(ia, ib) assert not diff.identical assert diff.diff_dimensions == () assert diff.diff_total == 2 assert diff.diff_ratio == 0.02 assert diff.diff_pixels == [((0, 0), (0, 10)), ((5, 5), (55, 20))] def test_identical_tables(self): c1 = Column('A', format='L', array=[True, False]) c2 = Column('B', format='X', array=[[0], [1]]) c3 = Column('C', format='4I', dim='(2, 2)', array=[[0, 1, 2, 3], [4, 5, 6, 7]]) c4 = Column('D', format='J', bscale=2.0, array=[0, 1]) c5 = Column('E', format='A3', array=['abc', 'def']) c6 = Column('F', format='E', unit='m', array=[0.0, 1.0]) c7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0]) c8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j]) c9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j]) c10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]]) columns = [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10] ta = BinTableHDU.from_columns(columns) tb = BinTableHDU.from_columns([c.copy() for c in columns]) diff = TableDataDiff(ta.data, tb.data) assert diff.identical assert len(diff.common_columns) == 10 assert diff.common_column_names == set('abcdefghij') assert diff.diff_ratio == 0 assert diff.diff_total == 0 def test_diff_empty_tables(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/178 Ensure that diffing tables containing empty data doesn't crash. """ c1 = Column('D', format='J') c2 = Column('E', format='J') thdu = BinTableHDU.from_columns([c1, c2], nrows=0) hdula = fits.HDUList([thdu]) hdulb = fits.HDUList([thdu]) diff = FITSDiff(hdula, hdulb) assert diff.identical def test_ignore_table_fields(self): c1 = Column('A', format='L', array=[True, False]) c2 = Column('B', format='X', array=[[0], [1]]) c3 = Column('C', format='4I', dim='(2, 2)', array=[[0, 1, 2, 3], [4, 5, 6, 7]]) c4 = Column('B', format='X', array=[[1], [0]]) c5 = Column('C', format='4I', dim='(2, 2)', array=[[1, 2, 3, 4], [5, 6, 7, 8]]) ta = BinTableHDU.from_columns([c1, c2, c3]) tb = BinTableHDU.from_columns([c1, c4, c5]) diff = TableDataDiff(ta.data, tb.data, ignore_fields=['B', 'C']) assert diff.identical # The only common column should be c1 assert len(diff.common_columns) == 1 assert diff.common_column_names == {'a'} assert diff.diff_ratio == 0 assert diff.diff_total == 0 def test_different_table_field_names(self): ca = Column('A', format='L', array=[True, False]) cb = Column('B', format='L', array=[True, False]) cc = Column('C', format='L', array=[True, False]) ta = BinTableHDU.from_columns([ca, cb]) tb = BinTableHDU.from_columns([ca, cc]) diff = TableDataDiff(ta.data, tb.data) assert not diff.identical assert len(diff.common_columns) == 1 assert diff.common_column_names == {'a'} assert diff.diff_column_names == (['B'], ['C']) assert diff.diff_ratio == 0 assert diff.diff_total == 0 report = diff.report() assert 'Extra column B of format L in a' in report assert 'Extra column C of format L in b' in report def test_different_table_field_counts(self): """ Test tables with some common columns, but different number of columns overall. """ ca = Column('A', format='L', array=[True, False]) cb = Column('B', format='L', array=[True, False]) cc = Column('C', format='L', array=[True, False]) ta = BinTableHDU.from_columns([cb]) tb = BinTableHDU.from_columns([ca, cb, cc]) diff = TableDataDiff(ta.data, tb.data) assert not diff.identical assert diff.diff_column_count == (1, 3) assert len(diff.common_columns) == 1 assert diff.common_column_names == {'b'} assert diff.diff_column_names == ([], ['A', 'C']) assert diff.diff_ratio == 0 assert diff.diff_total == 0 report = diff.report() assert ' Tables have different number of columns:' in report assert ' a: 1\n b: 3' in report def test_different_table_rows(self): """ Test tables that are otherwise identical but one has more rows than the other. """ ca1 = Column('A', format='L', array=[True, False]) cb1 = Column('B', format='L', array=[True, False]) ca2 = Column('A', format='L', array=[True, False, True]) cb2 = Column('B', format='L', array=[True, False, True]) ta = BinTableHDU.from_columns([ca1, cb1]) tb = BinTableHDU.from_columns([ca2, cb2]) diff = TableDataDiff(ta.data, tb.data) assert not diff.identical assert diff.diff_column_count == () assert len(diff.common_columns) == 2 assert diff.diff_rows == (2, 3) assert diff.diff_values == [] report = diff.report() assert 'Table rows differ' in report assert 'a: 2' in report assert 'b: 3' in report assert 'No further data comparison performed.' def test_different_table_data(self): """ Test diffing table data on columns of several different data formats and dimensions. """ ca1 = Column('A', format='L', array=[True, False]) ca2 = Column('B', format='X', array=[[0], [1]]) ca3 = Column('C', format='4I', dim='(2, 2)', array=[[0, 1, 2, 3], [4, 5, 6, 7]]) ca4 = Column('D', format='J', bscale=2.0, array=[0.0, 2.0]) ca5 = Column('E', format='A3', array=['abc', 'def']) ca6 = Column('F', format='E', unit='m', array=[0.0, 1.0]) ca7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0]) ca8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j]) ca9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j]) ca10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]]) cb1 = Column('A', format='L', array=[False, False]) cb2 = Column('B', format='X', array=[[0], [0]]) cb3 = Column('C', format='4I', dim='(2, 2)', array=[[0, 1, 2, 3], [5, 6, 7, 8]]) cb4 = Column('D', format='J', bscale=2.0, array=[2.0, 2.0]) cb5 = Column('E', format='A3', array=['abc', 'ghi']) cb6 = Column('F', format='E', unit='m', array=[1.0, 2.0]) cb7 = Column('G', format='D', bzero=-0.1, array=[2.0, 3.0]) cb8 = Column('H', format='C', array=[1.0+1.0j, 2.0+3.0j]) cb9 = Column('I', format='M', array=[5.0+5.0j, 6.0+7.0j]) cb10 = Column('J', format='PI(2)', array=[[1, 2], [3, 4]]) ta = BinTableHDU.from_columns([ca1, ca2, ca3, ca4, ca5, ca6, ca7, ca8, ca9, ca10]) tb = BinTableHDU.from_columns([cb1, cb2, cb3, cb4, cb5, cb6, cb7, cb8, cb9, cb10]) diff = TableDataDiff(ta.data, tb.data, numdiffs=20) assert not diff.identical # The column definitions are the same, but not the column values assert diff.diff_columns == () assert diff.diff_values[0] == (('A', 0), (True, False)) assert diff.diff_values[1] == (('B', 1), ([1], [0])) assert diff.diff_values[2][0] == ('C', 1) assert (diff.diff_values[2][1][0] == [[4, 5], [6, 7]]).all() assert (diff.diff_values[2][1][1] == [[5, 6], [7, 8]]).all() assert diff.diff_values[3] == (('D', 0), (0, 2.0)) assert diff.diff_values[4] == (('E', 1), ('def', 'ghi')) assert diff.diff_values[5] == (('F', 0), (0.0, 1.0)) assert diff.diff_values[6] == (('F', 1), (1.0, 2.0)) assert diff.diff_values[7] == (('G', 0), (0.0, 2.0)) assert diff.diff_values[8] == (('G', 1), (1.0, 3.0)) assert diff.diff_values[9] == (('H', 0), (0.0+1.0j, 1.0+1.0j)) assert diff.diff_values[10] == (('I', 0), (4.0+5.0j, 5.0+5.0j)) assert diff.diff_values[11][0] == ('J', 0) assert (diff.diff_values[11][1][0] == [0, 1]).all() assert (diff.diff_values[11][1][1] == [1, 2]).all() assert diff.diff_values[12][0] == ('J', 1) assert (diff.diff_values[12][1][0] == [2, 3]).all() assert (diff.diff_values[12][1][1] == [3, 4]).all() assert diff.diff_total == 13 assert diff.diff_ratio == 0.65 report = diff.report() assert ('Column A data differs in row 0:\n' ' a> True\n' ' b> False') in report assert ('...and at 1 more indices.\n' ' Column D data differs in row 0:') in report assert ('13 different table data element(s) found (65.00% different)' in report) assert report.count('more indices') == 1 def test_identical_files_basic(self): """Test identicality of two simple, extensionless files.""" a = np.arange(100).reshape(10, 10) hdu = PrimaryHDU(data=a) hdu.writeto(self.temp('testa.fits')) hdu.writeto(self.temp('testb.fits')) diff = FITSDiff(self.temp('testa.fits'), self.temp('testb.fits')) assert diff.identical report = diff.report() # Primary HDUs should contain no differences assert 'Primary HDU' not in report assert 'Extension HDU' not in report assert 'No differences found.' in report a = np.arange(10) ehdu = ImageHDU(data=a) diff = HDUDiff(ehdu, ehdu) assert diff.identical report = diff.report() assert 'No differences found.' in report def test_partially_identical_files1(self): """ Test files that have some identical HDUs but a different extension count. """ a = np.arange(100).reshape(10, 10) phdu = PrimaryHDU(data=a) ehdu = ImageHDU(data=a) hdula = HDUList([phdu, ehdu]) hdulb = HDUList([phdu, ehdu, ehdu]) diff = FITSDiff(hdula, hdulb) assert not diff.identical assert diff.diff_hdu_count == (2, 3) # diff_hdus should be empty, since the third extension in hdulb # has nothing to compare against assert diff.diff_hdus == [] report = diff.report() assert 'Files contain different numbers of HDUs' in report assert 'a: 2\n b: 3' in report assert 'No differences found between common HDUs' in report def test_partially_identical_files2(self): """ Test files that have some identical HDUs but one different HDU. """ a = np.arange(100).reshape(10, 10) phdu = PrimaryHDU(data=a) ehdu = ImageHDU(data=a) ehdu2 = ImageHDU(data=(a + 1)) hdula = HDUList([phdu, ehdu, ehdu]) hdulb = HDUList([phdu, ehdu2, ehdu]) diff = FITSDiff(hdula, hdulb) assert not diff.identical assert diff.diff_hdu_count == () assert len(diff.diff_hdus) == 1 assert diff.diff_hdus[0][0] == 1 hdudiff = diff.diff_hdus[0][1] assert not hdudiff.identical assert hdudiff.diff_extnames == () assert hdudiff.diff_extvers == () assert hdudiff.diff_extension_types == () assert hdudiff.diff_headers.identical assert hdudiff.diff_data is not None datadiff = hdudiff.diff_data assert isinstance(datadiff, ImageDataDiff) assert not datadiff.identical assert datadiff.diff_dimensions == () assert (datadiff.diff_pixels == [((0, y), (y, y + 1)) for y in range(10)]) assert datadiff.diff_ratio == 1.0 assert datadiff.diff_total == 100 report = diff.report() # Primary HDU and 2nd extension HDU should have no differences assert 'Primary HDU' not in report assert 'Extension HDU 2' not in report assert 'Extension HDU 1' in report assert 'Headers contain differences' not in report assert 'Data contains differences' in report for y in range(10): assert f'Data differs at [{y + 1}, 1]' in report assert '100 different pixels found (100.00% different).' in report def test_partially_identical_files3(self): """ Test files that have some identical HDUs but a different extension name. """ phdu = PrimaryHDU() ehdu = ImageHDU(name='FOO') hdula = HDUList([phdu, ehdu]) ehdu = BinTableHDU(name='BAR') ehdu.header['EXTVER'] = 2 ehdu.header['EXTLEVEL'] = 3 hdulb = HDUList([phdu, ehdu]) diff = FITSDiff(hdula, hdulb) assert not diff.identical assert diff.diff_hdus[0][0] == 1 hdu_diff = diff.diff_hdus[0][1] assert hdu_diff.diff_extension_types == ('IMAGE', 'BINTABLE') assert hdu_diff.diff_extnames == ('FOO', 'BAR') assert hdu_diff.diff_extvers == (1, 2) assert hdu_diff.diff_extlevels == (1, 3) report = diff.report() assert 'Extension types differ' in report assert 'a: IMAGE\n b: BINTABLE' in report assert 'Extension names differ' in report assert 'a: FOO\n b: BAR' in report assert 'Extension versions differ' in report assert 'a: 1\n b: 2' in report assert 'Extension levels differ' in report assert 'a: 1\n b: 2' in report def test_diff_nans(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/204 """ # First test some arrays that should be equivalent.... arr = np.empty((10, 10), dtype=np.float64) arr[:5] = 1.0 arr[5:] = np.nan arr2 = arr.copy() table = np.rec.array([(1.0, 2.0), (3.0, np.nan), (np.nan, np.nan)], names=['cola', 'colb']).view(fits.FITS_rec) table2 = table.copy() assert ImageDataDiff(arr, arr2).identical assert TableDataDiff(table, table2).identical # Now let's introduce some differences, where there are nans and where # there are not nans arr2[0][0] = 2.0 arr2[5][0] = 2.0 table2[0][0] = 2.0 table2[1][1] = 2.0 diff = ImageDataDiff(arr, arr2) assert not diff.identical assert diff.diff_pixels[0] == ((0, 0), (1.0, 2.0)) assert diff.diff_pixels[1][0] == (5, 0) assert np.isnan(diff.diff_pixels[1][1][0]) assert diff.diff_pixels[1][1][1] == 2.0 diff = TableDataDiff(table, table2) assert not diff.identical assert diff.diff_values[0] == (('cola', 0), (1.0, 2.0)) assert diff.diff_values[1][0] == ('colb', 1) assert np.isnan(diff.diff_values[1][1][0]) assert diff.diff_values[1][1][1] == 2.0 def test_file_output_from_path_string(self): outpath = self.temp('diff_output.txt') ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 diffobj = HeaderDiff(ha, hb) diffobj.report(fileobj=outpath) report_as_string = diffobj.report() with open(outpath) as fout: assert fout.read() == report_as_string def test_file_output_overwrite_safety(self): outpath = self.temp('diff_output.txt') ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 diffobj = HeaderDiff(ha, hb) diffobj.report(fileobj=outpath) with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): diffobj.report(fileobj=outpath) def test_file_output_overwrite_success(self): outpath = self.temp('diff_output.txt') ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 diffobj = HeaderDiff(ha, hb) diffobj.report(fileobj=outpath) report_as_string = diffobj.report() diffobj.report(fileobj=outpath, overwrite=True) with open(outpath) as fout: assert fout.read() == report_as_string, ( "overwritten output file is not identical to report string") def test_rawdatadiff_nodiff(self): a = np.arange(100, dtype='uint8').reshape(10, 10) b = a.copy() hdu_a = DummyNonstandardExtHDU(data=a) hdu_b = DummyNonstandardExtHDU(data=b) diff = HDUDiff(hdu_a, hdu_b) assert diff.identical report = diff.report() assert 'No differences found.' in report def test_rawdatadiff_dimsdiff(self): a = np.arange(100, dtype='uint8') + 10 b = a[:80].copy() hdu_a = DummyNonstandardExtHDU(data=a) hdu_b = DummyNonstandardExtHDU(data=b) diff = HDUDiff(hdu_a, hdu_b) assert not diff.identical report = diff.report() assert 'Data sizes differ:' in report assert 'a: 100 bytes' in report assert 'b: 80 bytes' in report assert 'No further data comparison performed.' in report def test_rawdatadiff_bytesdiff(self): a = np.arange(100, dtype='uint8') + 10 b = a.copy() changes = [(30, 200), (89, 170)] for i, v in changes: b[i] = v hdu_a = DummyNonstandardExtHDU(data=a) hdu_b = DummyNonstandardExtHDU(data=b) diff = HDUDiff(hdu_a, hdu_b) assert not diff.identical diff_bytes = diff.diff_data.diff_bytes assert len(changes) == len(diff_bytes) for j, (i, v) in enumerate(changes): assert diff_bytes[j] == (i, (i+10, v)) report = diff.report() assert 'Data contains differences:' in report for i, _ in changes: assert f'Data differs at byte {i}:' in report assert '2 different bytes found (2.00% different).' in report def test_fitsdiff_hdu_name(tmpdir): """Make sure diff report reports HDU name and ver if same in files""" path1 = str(tmpdir.join("test1.fits")) path2 = str(tmpdir.join("test2.fits")) hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5), name="SCI")]) hdulist.writeto(path1) hdulist[1].data[0] = 1 hdulist.writeto(path2) diff = FITSDiff(path1, path2) assert "Extension HDU 1 (SCI, 1):" in diff.report() def test_fitsdiff_no_hdu_name(tmpdir): """Make sure diff report doesn't report HDU name if not in files""" path1 = str(tmpdir.join("test1.fits")) path2 = str(tmpdir.join("test2.fits")) hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5))]) hdulist.writeto(path1) hdulist[1].data[0] = 1 hdulist.writeto(path2) diff = FITSDiff(path1, path2) assert "Extension HDU 1:" in diff.report() def test_fitsdiff_with_names(tmpdir): """Make sure diff report doesn't report HDU name if not same in files""" path1 = str(tmpdir.join("test1.fits")) path2 = str(tmpdir.join("test2.fits")) hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5), name="SCI", ver=1)]) hdulist.writeto(path1) hdulist[1].name = "ERR" hdulist.writeto(path2) diff = FITSDiff(path1, path2) assert "Extension HDU 1:" in diff.report() def test_rawdatadiff_diff_with_rtol(tmpdir): """Regression test for https://github.com/astropy/astropy/issues/13330""" path1 = str(tmpdir.join("test1.fits")) path2 = str(tmpdir.join("test2.fits")) a = np.zeros((10, 2), dtype='float32') a[:, 0] = np.arange(10, dtype='float32') + 10 a[:, 1] = np.arange(10, dtype='float32') + 20 b = a.copy() changes = [(3, 13.1, 23.1), (8, 20.5, 30.5)] for i, v, w in changes: b[i, 0] = v b[i, 1] = w ca = Column('A', format='20E', array=[a]) cb = Column('A', format='20E', array=[b]) hdu_a = BinTableHDU.from_columns([ca]) hdu_a.writeto(path1, overwrite=True) hdu_b = BinTableHDU.from_columns([cb]) hdu_b.writeto(path2, overwrite=True) with fits.open(path1) as fits1: with fits.open(path2) as fits2: diff = FITSDiff(fits1, fits2, atol=0, rtol=0.001) str1 = diff.report(fileobj=None, indent=0) diff = FITSDiff(fits1, fits2, atol=0, rtol=0.01) str2 = diff.report(fileobj=None, indent=0) assert "...and at 1 more indices." in str1 assert "...and at 1 more indices." not in str2
e61423a539d109c6426d0869aca1910066ddc4a1e38e4efdf218ce39f2834e15
"""Testing :mod:`astropy.cosmology.units`.""" ############################################################################## # IMPORTS import pytest import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology import Planck13, default_cosmology from astropy.tests.helper import assert_quantity_allclose from astropy.utils.compat.optional_deps import HAS_ASDF, HAS_SCIPY from astropy.utils.exceptions import AstropyDeprecationWarning ############################################################################## # TESTS ############################################################################## def test_has_expected_units(): """ Test that this module has the expected set of units. Some of the units are imported from :mod:`astropy.units`, or vice versa. Here we test presence, not usage. Units from :mod:`astropy.units` are tested in that module. Units defined in :mod:`astropy.cosmology` will be tested subsequently. """ with pytest.warns(AstropyDeprecationWarning, match="`littleh`"): assert u.astrophys.littleh is cu.littleh def test_has_expected_equivalencies(): """ Test that this module has the expected set of equivalencies. Many of the equivalencies are imported from :mod:`astropy.units`, so here we test presence, not usage. Equivalencies from :mod:`astropy.units` are tested in that module. Equivalencies defined in :mod:`astropy.cosmology` will be tested subsequently. """ with pytest.warns(AstropyDeprecationWarning, match="`with_H0`"): assert u.equivalencies.with_H0 is cu.with_H0 def test_littleh(): """Test :func:`astropy.cosmology.units.with_H0`.""" H0_70 = 70 * u.km / u.s / u.Mpc h70dist = 70 * u.Mpc / cu.littleh assert_quantity_allclose(h70dist.to(u.Mpc, cu.with_H0(H0_70)), 100 * u.Mpc) # make sure using the default cosmology works cosmodist = default_cosmology.get().H0.value * u.Mpc / cu.littleh assert_quantity_allclose(cosmodist.to(u.Mpc, cu.with_H0()), 100 * u.Mpc) # Now try a luminosity scaling h1lum = 0.49 * u.Lsun * cu.littleh ** -2 assert_quantity_allclose(h1lum.to(u.Lsun, cu.with_H0(H0_70)), 1 * u.Lsun) # And the trickiest one: magnitudes. Using H0=10 here for the round numbers H0_10 = 10 * u.km / u.s / u.Mpc # assume the "true" magnitude M = 12. # Then M - 5*log_10(h) = M + 5 = 17 withlittlehmag = 17 * (u.mag - u.MagUnit(cu.littleh ** 2)) assert_quantity_allclose(withlittlehmag.to(u.mag, cu.with_H0(H0_10)), 12 * u.mag) @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") def test_dimensionless_redshift(): """Test :func:`astropy.cosmology.units.dimensionless_redshift`.""" z = 3 * cu.redshift val = 3 * u.one # show units not equal assert z.unit == cu.redshift assert z.unit != u.one assert u.get_physical_type(z) == "redshift" # test equivalency enabled by default assert z == val # also test that it works for powers assert (3 * cu.redshift ** 3) == val # and in composite units assert (3 * u.km / cu.redshift ** 3) == 3 * u.km # test it also works as an equivalency with u.set_enabled_equivalencies([]): # turn off default equivalencies assert z.to(u.one, equivalencies=cu.dimensionless_redshift()) == val with pytest.raises(ValueError): z.to(u.one) # if this fails, something is really wrong with u.add_enabled_equivalencies(cu.dimensionless_redshift()): assert z == val @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") def test_redshift_temperature(): """Test :func:`astropy.cosmology.units.redshift_temperature`.""" cosmo = Planck13.clone(Tcmb0=3 * u.K) default_cosmo = default_cosmology.get() z = 15 * cu.redshift Tcmb = cosmo.Tcmb(z) # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.redshift_temperature() assert_quantity_allclose(z.to(u.K, equivalency), Tcmb) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.redshift_temperature() assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z)) assert default_cosmo.Tcmb(z) != Tcmb # 2) Specifying the cosmology equivalency = cu.redshift_temperature(cosmo) assert_quantity_allclose(z.to(u.K, equivalency), Tcmb) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # Test `atzkw` equivalency = cu.redshift_temperature(cosmo, ztol=1e-10) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") def test_redshift_hubble(): """Test :func:`astropy.cosmology.units.redshift_hubble`.""" unit = u.km / u.s / u.Mpc cosmo = Planck13.clone(H0=100 * unit) default_cosmo = default_cosmology.get() z = 15 * cu.redshift H = cosmo.H(z) h = H.to_value(u.km/u.s/u.Mpc) / 100 * cu.littleh # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.redshift_hubble() # H assert_quantity_allclose(z.to(unit, equivalency), H) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # little-h assert_quantity_allclose(z.to(cu.littleh, equivalency), h) assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.redshift_hubble() assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z)) assert default_cosmo.H(z) != H # 2) Specifying the cosmology equivalency = cu.redshift_hubble(cosmo) # H assert_quantity_allclose(z.to(unit, equivalency), H) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # little-h assert_quantity_allclose(z.to(cu.littleh, equivalency), h) assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # Test `atzkw` equivalency = cu.redshift_hubble(cosmo, ztol=1e-10) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # little-h @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") @pytest.mark.parametrize( "kind", [cu.redshift_distance.__defaults__[-1], "comoving", "lookback", "luminosity"] ) def test_redshift_distance(kind): """Test :func:`astropy.cosmology.units.redshift_distance`.""" z = 15 * cu.redshift d = getattr(Planck13, kind + "_distance")(z) equivalency = cu.redshift_distance(cosmology=Planck13, kind=kind) # properties of Equivalency assert equivalency.name[0] == "redshift_distance" assert equivalency.kwargs[0]["cosmology"] == Planck13 assert equivalency.kwargs[0]["distance"] == kind # roundtrip assert_quantity_allclose(z.to(u.Mpc, equivalency), d) assert_quantity_allclose(d.to(cu.redshift, equivalency), z) def test_redshift_distance_wrong_kind(): """Test :func:`astropy.cosmology.units.redshift_distance` wrong kind.""" with pytest.raises(ValueError, match="`kind`"): cu.redshift_distance(kind=None) @pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy") class Test_with_redshift: """Test `astropy.cosmology.units.with_redshift`.""" @pytest.fixture(scope="class") def cosmo(self): """Test cosmology.""" return Planck13.clone(Tcmb0=3 * u.K) # =========================================== def test_cosmo_different(self, cosmo): """The default is different than the test cosmology.""" default_cosmo = default_cosmology.get() assert default_cosmo != cosmo # shows changing default def test_no_equivalency(self, cosmo): """Test the equivalency ``with_redshift`` without any enabled.""" equivalency = cu.with_redshift(distance=None, hubble=False, Tcmb=False) assert len(equivalency) == 0 # ------------------------------------------- def test_temperature_off(self, cosmo): """Test ``with_redshift`` with the temperature off.""" z = 15 * cu.redshift err_msg = ( r"^'redshift' \(redshift\) and 'K' \(temperature\) are not convertible$" ) # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(Tcmb=False) with pytest.raises(u.UnitConversionError, match=err_msg): z.to(u.K, equivalency) # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, Tcmb=False) with pytest.raises(u.UnitConversionError, match=err_msg): z.to(u.K, equivalency) def test_temperature(self, cosmo): """Test temperature equivalency component.""" default_cosmo = default_cosmology.get() z = 15 * cu.redshift Tcmb = cosmo.Tcmb(z) # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(Tcmb=True) assert_quantity_allclose(z.to(u.K, equivalency), Tcmb) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.with_redshift(Tcmb=True) assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z)) assert default_cosmo.Tcmb(z) != Tcmb # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, Tcmb=True) assert_quantity_allclose(z.to(u.K, equivalency), Tcmb) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # Test `atzkw` # this is really just a test that 'atzkw' doesn't fail equivalency = cu.with_redshift(cosmo, Tcmb=True, atzkw={"ztol": 1e-10}) assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) # ------------------------------------------- def test_hubble_off(self, cosmo): """Test ``with_redshift`` with Hubble off.""" unit = u.km / u.s / u.Mpc z = 15 * cu.redshift err_msg = ( r"^'redshift' \(redshift\) and 'km / \(Mpc s\)' \(frequency\) are not " "convertible$" ) # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(hubble=False) with pytest.raises(u.UnitConversionError, match=err_msg): z.to(unit, equivalency) # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, hubble=False) with pytest.raises(u.UnitConversionError, match=err_msg): z.to(unit, equivalency) def test_hubble(self, cosmo): """Test Hubble equivalency component.""" unit = u.km/u.s/u.Mpc default_cosmo = default_cosmology.get() z = 15 * cu.redshift H = cosmo.H(z) h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(hubble=True) # H assert_quantity_allclose(z.to(unit, equivalency), H) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # little-h assert_quantity_allclose(z.to(cu.littleh, equivalency), h) assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.with_redshift(hubble=True) assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z)) assert default_cosmo.H(z) != H # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, hubble=True) # H assert_quantity_allclose(z.to(unit, equivalency), H) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # little-h assert_quantity_allclose(z.to(cu.littleh, equivalency), h) assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # Test `atzkw` # this is really just a test that 'atzkw' doesn't fail equivalency = cu.with_redshift(cosmo, hubble=True, atzkw={"ztol": 1e-10}) assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # h # ------------------------------------------- def test_distance_off(self, cosmo): """Test ``with_redshift`` with the distance off.""" z = 15 * cu.redshift err_msg = r"^'redshift' \(redshift\) and 'Mpc' \(length\) are not convertible$" # 1) Default (without specifying the cosmology) with default_cosmology.set(cosmo): equivalency = cu.with_redshift(distance=None) with pytest.raises(u.UnitConversionError, match=err_msg): z.to(u.Mpc, equivalency) # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, distance=None) with pytest.raises(u.UnitConversionError, match=err_msg): z.to(u.Mpc, equivalency) def test_distance_default(self): """Test distance equivalency default.""" z = 15 * cu.redshift d = default_cosmology.get().comoving_distance(z) equivalency = cu.with_redshift() assert_quantity_allclose(z.to(u.Mpc, equivalency), d) assert_quantity_allclose(d.to(cu.redshift, equivalency), z) def test_distance_wrong_kind(self): """Test distance equivalency, but the wrong kind.""" with pytest.raises(ValueError, match="`kind`"): cu.with_redshift(distance=ValueError) @pytest.mark.parametrize("kind", ["comoving", "lookback", "luminosity"]) def test_distance(self, kind): """Test distance equivalency.""" cosmo = Planck13 z = 15 * cu.redshift dist = getattr(cosmo, kind + "_distance")(z) default_cosmo = default_cosmology.get() assert default_cosmo != cosmo # shows changing default # 1) without specifying the cosmology with default_cosmology.set(cosmo): equivalency = cu.with_redshift(distance=kind) assert_quantity_allclose(z.to(u.Mpc, equivalency), dist) # showing the answer changes if the cosmology changes # this test uses the default cosmology equivalency = cu.with_redshift(distance=kind) assert_quantity_allclose(z.to(u.Mpc, equivalency), getattr(default_cosmo, kind + "_distance")(z)) assert not u.allclose(getattr(default_cosmo, kind + "_distance")(z), dist) # 2) Specifying the cosmology equivalency = cu.with_redshift(cosmo, distance=kind) assert_quantity_allclose(z.to(u.Mpc, equivalency), dist) assert_quantity_allclose(dist.to(cu.redshift, equivalency), z) # Test atzkw # this is really just a test that 'atzkw' doesn't fail equivalency = cu.with_redshift(cosmo, distance=kind, atzkw={"ztol": 1e-10}) assert_quantity_allclose(dist.to(cu.redshift, equivalency), z) # FIXME! get "dimensionless_redshift", "with_redshift" to work in this # they are not in ``astropy.units.equivalencies``, so the following fails @pytest.mark.skipif(not HAS_ASDF, reason="requires ASDF") @pytest.mark.parametrize("equiv", [cu.with_H0]) def test_equivalencies_asdf(tmpdir, equiv, recwarn): from asdf.tests import helpers tree = {"equiv": equiv()} helpers.assert_roundtrip_tree(tree, tmpdir) def test_equivalency_context_manager(): base_registry = u.get_current_unit_registry() # check starting with only the dimensionless_redshift equivalency. assert len(base_registry.equivalencies) == 1 assert str(base_registry.equivalencies[0][0]) == "redshift"
d0b19936fc07cc613e040958520c284418c4f657515d89db3d6b47c9885c0a29
#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst # NOTE: The configuration for the package, including the name, version, and # other information are set in the setup.cfg file. import sys # First provide helpful messages if contributors try and run legacy commands # for tests or docs. TEST_HELP = """ Note: running tests is no longer done using 'python setup.py test'. Instead you will need to run: tox -e test If you don't already have tox installed, you can install it with: pip install tox If you only want to run part of the test suite, you can also use pytest directly with:: pip install -e .[test] pytest For more information, see: https://docs.astropy.org/en/latest/development/testguide.html#running-tests """ if "test" in sys.argv: print(TEST_HELP) sys.exit(1) DOCS_HELP = """ Note: building the documentation is no longer done using 'python setup.py build_docs'. Instead you will need to run: tox -e build_docs If you don't already have tox installed, you can install it with: pip install tox You can also build the documentation with Sphinx directly using:: pip install -e .[docs] cd docs make html For more information, see: https://docs.astropy.org/en/latest/install.html#builddocs """ if "build_docs" in sys.argv or "build_sphinx" in sys.argv: print(DOCS_HELP) sys.exit(1) # Only import these if the above checks are okay # to avoid masking the real problem with import error. from setuptools import setup # noqa: E402 from extension_helpers import get_extensions # noqa: E402 setup(ext_modules=get_extensions())
5a78048f138b91ad2fd31b70e2134422889f20c5a65918b898c023a452f4e018
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file is the main file used when running tests with pytest directly, # in particular if running e.g. ``pytest docs/``. import os import tempfile import hypothesis from astropy import __version__ try: from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS except ImportError: PYTEST_HEADER_MODULES = {} TESTED_VERSIONS = {} # This has to be in the root dir or it will not display in CI. def pytest_configure(config): PYTEST_HEADER_MODULES["PyERFA"] = "erfa" PYTEST_HEADER_MODULES["Cython"] = "cython" PYTEST_HEADER_MODULES["Scikit-image"] = "skimage" PYTEST_HEADER_MODULES["asdf"] = "asdf" PYTEST_HEADER_MODULES["pyarrow"] = "pyarrow" TESTED_VERSIONS["Astropy"] = __version__ # This has to be in the root dir or it will not display in CI. def pytest_report_header(config): # This gets added after the pytest-astropy-header output. return ( f'CI: {os.environ.get("CI", "undefined")}\n' f'ARCH_ON_CI: {os.environ.get("ARCH_ON_CI", "undefined")}\n' f'IS_CRON: {os.environ.get("IS_CRON", "undefined")}\n' ) # Tell Hypothesis that we might be running slow tests, to print the seed blob # so we can easily reproduce failures from CI, and derive a fuzzing profile # to try many more inputs when we detect a scheduled build or when specifically # requested using the HYPOTHESIS_PROFILE=fuzz environment variable or # `pytest --hypothesis-profile=fuzz ...` argument. hypothesis.settings.register_profile( "ci", deadline=None, print_blob=True, derandomize=True ) hypothesis.settings.register_profile( "fuzzing", deadline=None, print_blob=True, max_examples=1000 ) default = ( "fuzzing" if ( os.environ.get("IS_CRON") == "true" and os.environ.get("ARCH_ON_CI") not in ("aarch64", "ppc64le") ) else "ci" ) hypothesis.settings.load_profile(os.environ.get("HYPOTHESIS_PROFILE", default)) # Make sure we use temporary directories for the config and cache # so that the tests are insensitive to local configuration. os.environ["XDG_CONFIG_HOME"] = tempfile.mkdtemp("astropy_config") os.environ["XDG_CACHE_HOME"] = tempfile.mkdtemp("astropy_cache") os.mkdir(os.path.join(os.environ["XDG_CONFIG_HOME"], "astropy")) os.mkdir(os.path.join(os.environ["XDG_CACHE_HOME"], "astropy")) # Note that we don't need to change the environment variables back or remove # them after testing, because they are only changed for the duration of the # Python process, and this configuration only matters if running pytest # directly, not from e.g. an IPython session.
06178ec181388c9f14a79e030cc36d36c5954210c077a2c462fa32cbebd4d684
# NOTE: First try _dev.scm_version if it exists and setuptools_scm is installed # This file is not included in astropy wheels/tarballs, so otherwise it will # fall back on the generated _version module. try: try: from ._dev.scm_version import version except ImportError: from ._version import version except Exception: import warnings warnings.warn( f'could not determine {__name__.split(".")[0]} package version; ' "this indicates a broken installation" ) del warnings version = "0.0.0" # We use Version to define major, minor, micro, but ignore any suffixes. def split_version(version): pieces = [0, 0, 0] try: from packaging.version import Version v = Version(version) pieces = [v.major, v.minor, v.micro] except Exception: pass return pieces major, minor, bugfix = split_version(version) del split_version # clean up namespace. release = "dev" not in version
fae2e15ae57c8148f284d5da889fe5715ad3009fe8167fd52de86a0dd5a3298f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astropy is a package intended to contain core functionality and some common tools needed for performing astronomy and astrophysics research with Python. It also provides an index for other astronomy packages and tools for managing them. """ import os import sys from .version import version as __version__ def _is_astropy_source(path=None): """ Returns whether the source for this module is directly in an astropy source distribution or checkout. """ # If this __init__.py file is in ./astropy/ then import is within a source # dir .astropy-root is a file distributed with the source, but that should # not installed if path is None: path = os.path.join(os.path.dirname(__file__), os.pardir) elif os.path.isfile(path): path = os.path.dirname(path) source_dir = os.path.abspath(path) return os.path.exists(os.path.join(source_dir, ".astropy-root")) # The location of the online documentation for astropy # This location will normally point to the current released version of astropy if "dev" in __version__: online_docs_root = "https://docs.astropy.org/en/latest/" else: online_docs_root = f"https://docs.astropy.org/en/{__version__}/" from . import config as _config class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy`. """ unicode_output = _config.ConfigItem( False, "When True, use Unicode characters when outputting values, and " "displaying widgets at the console.", ) use_color = _config.ConfigItem( sys.platform != "win32", "When True, use ANSI color escape sequences when writing to the console.", aliases=["astropy.utils.console.USE_COLOR", "astropy.logger.USE_COLOR"], ) max_lines = _config.ConfigItem( None, description=( "Maximum number of lines in the display of pretty-printed " "objects. If not provided, try to determine automatically from the " "terminal size. Negative numbers mean no limit." ), cfgtype="integer(default=None)", aliases=["astropy.table.pprint.max_lines"], ) max_width = _config.ConfigItem( None, description=( "Maximum number of characters per line in the display of " "pretty-printed objects. If not provided, try to determine " "automatically from the terminal size. Negative numbers mean no " "limit." ), cfgtype="integer(default=None)", aliases=["astropy.table.pprint.max_width"], ) conf = Conf() # Define a base ScienceState for configuring constants and units from .utils.state import ScienceState class base_constants_version(ScienceState): """ Base class for the real version-setters below """ _value = "test" _versions = dict(test="test") @classmethod def validate(cls, value): if value not in cls._versions: raise ValueError(f"Must be one of {list(cls._versions.keys())}") return cls._versions[value] @classmethod def set(cls, value): """ Set the current constants value. """ import sys if "astropy.units" in sys.modules: raise RuntimeError("astropy.units is already imported") if "astropy.constants" in sys.modules: raise RuntimeError("astropy.constants is already imported") return super().set(value) class physical_constants(base_constants_version): """ The version of physical constants to use """ # Maintainers: update when new constants are added _value = "codata2018" _versions = dict( codata2018="codata2018", codata2014="codata2014", codata2010="codata2010", astropyconst40="codata2018", astropyconst20="codata2014", astropyconst13="codata2010", ) class astronomical_constants(base_constants_version): """ The version of astronomical constants to use """ # Maintainers: update when new constants are added _value = "iau2015" _versions = dict( iau2015="iau2015", iau2012="iau2012", astropyconst40="iau2015", astropyconst20="iau2015", astropyconst13="iau2012", ) # Create the test() function from .tests.runner import TestRunner test = TestRunner.make_test_runner_in(__path__[0]) # if we are *not* in setup mode, import the logger and possibly populate the # configuration file with the defaults def _initialize_astropy(): try: from .utils import _compiler except ImportError: if _is_astropy_source(): raise ImportError( "You appear to be trying to import astropy from " "within a source checkout or from an editable " "installation without building the extension " "modules first. Either run:\n\n" " pip install -e .\n\nor\n\n" " python setup.py build_ext --inplace\n\n" "to make sure the extension modules are built " ) else: # Outright broken installation, just raise standard error raise # Set the bibtex entry to the article referenced in CITATION. def _get_bibtex(): citation_file = os.path.join(os.path.dirname(__file__), "CITATION") with open(citation_file) as citation: refs = citation.read().split("@ARTICLE")[1:] if len(refs) == 0: return "" bibtexreference = f"@ARTICLE{refs[0]}" return bibtexreference __citation__ = __bibtex__ = _get_bibtex() from .logger import _init_log, _teardown_log log = _init_log() _initialize_astropy() from .utils.misc import find_api_page def online_help(query): """ Search the online Astropy documentation for the given query. Opens the results in the default web browser. Requires an active Internet connection. Parameters ---------- query : str The search query. """ import webbrowser from urllib.parse import urlencode version = __version__ if "dev" in version: version = "latest" else: version = "v" + version url = f"https://docs.astropy.org/en/{version}/search.html?{urlencode({'q': query})}" webbrowser.open(url) __dir_inc__ = [ "__version__", "__githash__", "__bibtex__", "test", "log", "find_api_page", "online_help", "online_docs_root", "conf", "physical_constants", "astronomical_constants", ] from types import ModuleType as __module_type__ # Clean up top-level namespace--delete everything that isn't in __dir_inc__ # or is a magic attribute, and that isn't a submodule of this package for varname in dir(): if not ( (varname.startswith("__") and varname.endswith("__")) or varname in __dir_inc__ or ( varname[0] != "_" and isinstance(locals()[varname], __module_type__) and locals()[varname].__name__.startswith(__name__ + ".") ) ): # The last clause in the the above disjunction deserves explanation: # When using relative imports like ``from .. import config``, the # ``config`` variable is automatically created in the namespace of # whatever module ``..`` resolves to (in this case astropy). This # happens a few times just in the module setup above. This allows # the cleanup to keep any public submodules of the astropy package del locals()[varname] del varname, __module_type__
82ce22576d0cbf0c7f9adfebba05b3e88ca044e525757e0099b748bcf40e8f79
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This file contains pytest configuration settings that are astropy-specific (i.e. those that would not necessarily be shared by affiliated packages making use of astropy's test runner). """ import builtins import os import sys import tempfile import warnings try: from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS except ImportError: PYTEST_HEADER_MODULES = {} TESTED_VERSIONS = {} import pytest from astropy import __version__ # This is needed to silence a warning from matplotlib caused by # PyInstaller's matplotlib runtime hook. This can be removed once the # issue is fixed upstream in PyInstaller, and only impacts us when running # the tests from a PyInstaller bundle. # See https://github.com/astropy/astropy/issues/10785 if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"): # The above checks whether we are running in a PyInstaller bundle. warnings.filterwarnings("ignore", "(?s).*MATPLOTLIBDATA.*", category=UserWarning) # Note: while the filterwarnings is required, this import has to come after the # filterwarnings above, because this attempts to import matplotlib: from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB if HAS_MATPLOTLIB: import matplotlib matplotlibrc_cache = {} @pytest.fixture def ignore_matplotlibrc(): # This is a fixture for tests that use matplotlib but not pytest-mpl # (which already handles rcParams) from matplotlib import pyplot as plt with plt.style.context({}, after_reset=True): yield @pytest.fixture def fast_thread_switching(): """Fixture that reduces thread switching interval. This makes it easier to provoke race conditions. """ old = sys.getswitchinterval() sys.setswitchinterval(1e-6) yield sys.setswitchinterval(old) def pytest_configure(config): from astropy.utils.iers import conf as iers_conf # Disable IERS auto download for testing iers_conf.auto_download = False builtins._pytest_running = True # do not assign to matplotlibrc_cache in function scope if HAS_MATPLOTLIB: with warnings.catch_warnings(): warnings.simplefilter("ignore") matplotlibrc_cache.update(matplotlib.rcParams) matplotlib.rcdefaults() matplotlib.use("Agg") # Make sure we use temporary directories for the config and cache # so that the tests are insensitive to local configuration. Note that this # is also set in the test runner, but we need to also set it here for # things to work properly in parallel mode builtins._xdg_config_home_orig = os.environ.get("XDG_CONFIG_HOME") builtins._xdg_cache_home_orig = os.environ.get("XDG_CACHE_HOME") os.environ["XDG_CONFIG_HOME"] = tempfile.mkdtemp("astropy_config") os.environ["XDG_CACHE_HOME"] = tempfile.mkdtemp("astropy_cache") os.mkdir(os.path.join(os.environ["XDG_CONFIG_HOME"], "astropy")) os.mkdir(os.path.join(os.environ["XDG_CACHE_HOME"], "astropy")) config.option.astropy_header = True PYTEST_HEADER_MODULES["PyERFA"] = "erfa" PYTEST_HEADER_MODULES["Cython"] = "cython" PYTEST_HEADER_MODULES["Scikit-image"] = "skimage" PYTEST_HEADER_MODULES["asdf"] = "asdf" TESTED_VERSIONS["Astropy"] = __version__ def pytest_unconfigure(config): from astropy.utils.iers import conf as iers_conf # Undo IERS auto download setting for testing iers_conf.reset("auto_download") builtins._pytest_running = False # do not assign to matplotlibrc_cache in function scope if HAS_MATPLOTLIB: with warnings.catch_warnings(): warnings.simplefilter("ignore") matplotlib.rcParams.update(matplotlibrc_cache) matplotlibrc_cache.clear() if builtins._xdg_config_home_orig is None: os.environ.pop("XDG_CONFIG_HOME") else: os.environ["XDG_CONFIG_HOME"] = builtins._xdg_config_home_orig if builtins._xdg_cache_home_orig is None: os.environ.pop("XDG_CACHE_HOME") else: os.environ["XDG_CACHE_HOME"] = builtins._xdg_cache_home_orig def pytest_terminal_summary(terminalreporter): """Output a warning to IPython users in case any tests failed.""" try: get_ipython() except NameError: return if not terminalreporter.stats.get("failed"): # Only issue the warning when there are actually failures return terminalreporter.ensure_newline() terminalreporter.write_line( "Some tests may fail when run from the IPython prompt; " "especially, but not limited to tests involving logging and warning " "handling. Unless you are certain as to the cause of the failure, " "please check that the failure occurs outside IPython as well. See " "https://docs.astropy.org/en/stable/known_issues.html#failing-logging-" "tests-when-running-the-tests-in-ipython for more information.", yellow=True, bold=True, )
79fda3234b5b9e553a4dd1557195a675431c0a6eb146223e417b31f325b7a84a
# Licensed under a 3-clause BSD style license - see LICENSE.rst """This module defines a logging class based on the built-in logging module. .. note:: This module is meant for internal ``astropy`` usage. For use in other packages, we recommend implementing your own logger instead. """ import inspect import logging import os import sys import warnings from contextlib import contextmanager from . import conf as _conf from . import config as _config from .utils import find_current_module from .utils.exceptions import AstropyUserWarning, AstropyWarning __all__ = ["Conf", "conf", "log", "AstropyLogger", "LoggingError"] # import the logging levels from logging so that one can do: # log.setLevel(log.DEBUG), for example logging_levels = [ "NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL", "FATAL", ] for level in logging_levels: globals()[level] = getattr(logging, level) __all__ += logging_levels # Initialize by calling _init_log() log = None class LoggingError(Exception): """ This exception is for various errors that occur in the astropy logger, typically when activating or deactivating logger-related features. """ class _AstLogIPYExc(Exception): """ An exception that is used only as a placeholder to indicate to the IPython exception-catching mechanism that the astropy exception-capturing is activated. It should not actually be used as an exception anywhere. """ class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.logger`. """ log_level = _config.ConfigItem( "INFO", "Threshold for the logging messages. Logging " "messages that are less severe than this level " "will be ignored. The levels are ``'DEBUG'``, " "``'INFO'``, ``'WARNING'``, ``'ERROR'``.", ) log_warnings = _config.ConfigItem(True, "Whether to log `warnings.warn` calls.") log_exceptions = _config.ConfigItem( False, "Whether to log exceptions before raising them." ) log_to_file = _config.ConfigItem( False, "Whether to always log messages to a log file." ) log_file_path = _config.ConfigItem( "", "The file to log messages to. If empty string is given, " "it defaults to a file ``'astropy.log'`` in " "the astropy config directory.", ) log_file_level = _config.ConfigItem( "INFO", "Threshold for logging messages to `log_file_path`." ) log_file_format = _config.ConfigItem( "%(asctime)r, %(origin)r, %(levelname)r, %(message)r", "Format for log file entries.", ) log_file_encoding = _config.ConfigItem( "", "The encoding (e.g., UTF-8) to use for the log file. If empty string " "is given, it defaults to the platform-preferred encoding.", ) conf = Conf() def _init_log(): """Initializes the Astropy log--in most circumstances this is called automatically when importing astropy. """ global log orig_logger_cls = logging.getLoggerClass() logging.setLoggerClass(AstropyLogger) try: log = logging.getLogger("astropy") log._set_defaults() finally: logging.setLoggerClass(orig_logger_cls) return log def _teardown_log(): """Shut down exception and warning logging (if enabled) and clear all Astropy loggers from the logging module's cache. This involves poking some logging module internals, so much if it is 'at your own risk' and is allowed to pass silently if any exceptions occur. """ global log if log.exception_logging_enabled(): log.disable_exception_logging() if log.warnings_logging_enabled(): log.disable_warnings_logging() del log # Now for the fun stuff... try: logging._acquireLock() try: loggerDict = logging.Logger.manager.loggerDict for key in loggerDict.keys(): if key == "astropy" or key.startswith("astropy."): del loggerDict[key] finally: logging._releaseLock() except Exception: pass Logger = logging.getLoggerClass() class AstropyLogger(Logger): """ This class is used to set up the Astropy logging. The main functionality added by this class over the built-in logging.Logger class is the ability to keep track of the origin of the messages, the ability to enable logging of warnings.warn calls and exceptions, and the addition of colorized output and context managers to easily capture messages to a file or list. """ def makeRecord( self, name, level, pathname, lineno, msg, args, exc_info, func=None, extra=None, sinfo=None, ): if extra is None: extra = {} if "origin" not in extra: current_module = find_current_module(1, finddiff=[True, "logging"]) if current_module is not None: extra["origin"] = current_module.__name__ else: extra["origin"] = "unknown" return Logger.makeRecord( self, name, level, pathname, lineno, msg, args, exc_info, func=func, extra=extra, sinfo=sinfo, ) _showwarning_orig = None def _showwarning(self, *args, **kwargs): # Bail out if we are not catching a warning from Astropy if not isinstance(args[0], AstropyWarning): return self._showwarning_orig(*args, **kwargs) warning = args[0] # Deliberately not using isinstance here: We want to display # the class name only when it's not the default class, # AstropyWarning. The name of subclasses of AstropyWarning should # be displayed. if type(warning) not in (AstropyWarning, AstropyUserWarning): message = f"{warning.__class__.__name__}: {args[0]}" else: message = str(args[0]) mod_path = args[2] # Now that we have the module's path, we look through sys.modules to # find the module object and thus the fully-package-specified module # name. The module.__file__ is the original source file name. mod_name = None mod_path, ext = os.path.splitext(mod_path) for name, mod in list(sys.modules.items()): try: # Believe it or not this can fail in some cases: # https://github.com/astropy/astropy/issues/2671 path = os.path.splitext(getattr(mod, "__file__", ""))[0] except Exception: continue if path == mod_path: mod_name = mod.__name__ break if mod_name is not None: self.warning(message, extra={"origin": mod_name}) else: self.warning(message) def warnings_logging_enabled(self): return self._showwarning_orig is not None def enable_warnings_logging(self): """ Enable logging of warnings.warn() calls Once called, any subsequent calls to ``warnings.warn()`` are redirected to this logger and emitted with level ``WARN``. Note that this replaces the output from ``warnings.warn``. This can be disabled with ``disable_warnings_logging``. """ if self.warnings_logging_enabled(): raise LoggingError("Warnings logging has already been enabled") self._showwarning_orig = warnings.showwarning warnings.showwarning = self._showwarning def disable_warnings_logging(self): """ Disable logging of warnings.warn() calls Once called, any subsequent calls to ``warnings.warn()`` are no longer redirected to this logger. This can be re-enabled with ``enable_warnings_logging``. """ if not self.warnings_logging_enabled(): raise LoggingError("Warnings logging has not been enabled") if warnings.showwarning != self._showwarning: raise LoggingError( "Cannot disable warnings logging: " "warnings.showwarning was not set by this " "logger, or has been overridden" ) warnings.showwarning = self._showwarning_orig self._showwarning_orig = None _excepthook_orig = None def _excepthook(self, etype, value, traceback): if traceback is None: mod = None else: tb = traceback while tb.tb_next is not None: tb = tb.tb_next mod = inspect.getmodule(tb) # include the the error type in the message. if len(value.args) > 0: message = f"{etype.__name__}: {str(value)}" else: message = str(etype.__name__) if mod is not None: self.error(message, extra={"origin": mod.__name__}) else: self.error(message) self._excepthook_orig(etype, value, traceback) def exception_logging_enabled(self): """ Determine if the exception-logging mechanism is enabled. Returns ------- exclog : bool True if exception logging is on, False if not. """ try: ip = get_ipython() except NameError: ip = None if ip is None: return self._excepthook_orig is not None else: return _AstLogIPYExc in ip.custom_exceptions def enable_exception_logging(self): """ Enable logging of exceptions Once called, any uncaught exceptions will be emitted with level ``ERROR`` by this logger, before being raised. This can be disabled with ``disable_exception_logging``. """ try: ip = get_ipython() except NameError: ip = None if self.exception_logging_enabled(): raise LoggingError("Exception logging has already been enabled") if ip is None: # standard python interpreter self._excepthook_orig = sys.excepthook sys.excepthook = self._excepthook else: # IPython has its own way of dealing with excepthook # We need to locally define the function here, because IPython # actually makes this a member function of their own class def ipy_exc_handler(ipyshell, etype, evalue, tb, tb_offset=None): # First use our excepthook self._excepthook(etype, evalue, tb) # Now also do IPython's traceback ipyshell.showtraceback((etype, evalue, tb), tb_offset=tb_offset) # now register the function with IPython # note that we include _AstLogIPYExc so `disable_exception_logging` # knows that it's disabling the right thing ip.set_custom_exc((BaseException, _AstLogIPYExc), ipy_exc_handler) # and set self._excepthook_orig to a no-op self._excepthook_orig = lambda etype, evalue, tb: None def disable_exception_logging(self): """ Disable logging of exceptions Once called, any uncaught exceptions will no longer be emitted by this logger. This can be re-enabled with ``enable_exception_logging``. """ try: ip = get_ipython() except NameError: ip = None if not self.exception_logging_enabled(): raise LoggingError("Exception logging has not been enabled") if ip is None: # standard python interpreter if sys.excepthook != self._excepthook: raise LoggingError( "Cannot disable exception logging: " "sys.excepthook was not set by this logger, " "or has been overridden" ) sys.excepthook = self._excepthook_orig self._excepthook_orig = None else: # IPython has its own way of dealing with exceptions ip.set_custom_exc(tuple(), None) def enable_color(self): """ Enable colorized output """ _conf.use_color = True def disable_color(self): """ Disable colorized output """ _conf.use_color = False @contextmanager def log_to_file(self, filename, filter_level=None, filter_origin=None): """ Context manager to temporarily log messages to a file. Parameters ---------- filename : str The file to log messages to. filter_level : str If set, any log messages less important than ``filter_level`` will not be output to the file. Note that this is in addition to the top-level filtering for the logger, so if the logger has level 'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG`` will have no effect, since these messages are already filtered out. filter_origin : str If set, only log messages with an origin starting with ``filter_origin`` will be output to the file. Notes ----- By default, the logger already outputs log messages to a file set in the Astropy configuration file. Using this context manager does not stop log messages from being output to that file, nor does it stop log messages from being printed to standard output. Examples -------- The context manager is used as:: with logger.log_to_file('myfile.log'): # your code here """ encoding = conf.log_file_encoding if conf.log_file_encoding else None fh = logging.FileHandler(filename, encoding=encoding) if filter_level is not None: fh.setLevel(filter_level) if filter_origin is not None: fh.addFilter(FilterOrigin(filter_origin)) f = logging.Formatter(conf.log_file_format) fh.setFormatter(f) self.addHandler(fh) yield fh.close() self.removeHandler(fh) @contextmanager def log_to_list(self, filter_level=None, filter_origin=None): """ Context manager to temporarily log messages to a list. Parameters ---------- filename : str The file to log messages to. filter_level : str If set, any log messages less important than ``filter_level`` will not be output to the file. Note that this is in addition to the top-level filtering for the logger, so if the logger has level 'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG`` will have no effect, since these messages are already filtered out. filter_origin : str If set, only log messages with an origin starting with ``filter_origin`` will be output to the file. Notes ----- Using this context manager does not stop log messages from being output to standard output. Examples -------- The context manager is used as:: with logger.log_to_list() as log_list: # your code here """ lh = ListHandler() if filter_level is not None: lh.setLevel(filter_level) if filter_origin is not None: lh.addFilter(FilterOrigin(filter_origin)) self.addHandler(lh) yield lh.log_list self.removeHandler(lh) def _set_defaults(self): """ Reset logger to its initial state """ # Reset any previously installed hooks if self.warnings_logging_enabled(): self.disable_warnings_logging() if self.exception_logging_enabled(): self.disable_exception_logging() # Remove all previous handlers for handler in self.handlers[:]: self.removeHandler(handler) # Set levels self.setLevel(conf.log_level) # Set up the stdout handler sh = StreamHandler() self.addHandler(sh) # Set up the main log file handler if requested (but this might fail if # configuration directory or log file is not writeable). if conf.log_to_file: log_file_path = conf.log_file_path # "None" as a string because it comes from config try: _ASTROPY_TEST_ testing_mode = True except NameError: testing_mode = False try: if log_file_path == "" or testing_mode: log_file_path = os.path.join( _config.get_config_dir("astropy"), "astropy.log" ) else: log_file_path = os.path.expanduser(log_file_path) encoding = conf.log_file_encoding if conf.log_file_encoding else None fh = logging.FileHandler(log_file_path, encoding=encoding) except OSError as e: warnings.warn( f"log file {log_file_path!r} could not be opened for writing:" f" {str(e)}", RuntimeWarning, ) else: formatter = logging.Formatter(conf.log_file_format) fh.setFormatter(formatter) fh.setLevel(conf.log_file_level) self.addHandler(fh) if conf.log_warnings: self.enable_warnings_logging() if conf.log_exceptions: self.enable_exception_logging() class StreamHandler(logging.StreamHandler): """ A specialized StreamHandler that logs INFO and DEBUG messages to stdout, and all other messages to stderr. Also provides coloring of the output, if enabled in the parent logger. """ def emit(self, record): """ The formatter for stderr """ if record.levelno <= logging.INFO: stream = sys.stdout else: stream = sys.stderr if record.levelno < logging.DEBUG or not _conf.use_color: print(record.levelname, end="", file=stream) else: # Import utils.console only if necessary and at the latest because # the import takes a significant time [#4649] from .utils.console import color_print if record.levelno < logging.INFO: color_print(record.levelname, "magenta", end="", file=stream) elif record.levelno < logging.WARN: color_print(record.levelname, "green", end="", file=stream) elif record.levelno < logging.ERROR: color_print(record.levelname, "brown", end="", file=stream) else: color_print(record.levelname, "red", end="", file=stream) record.message = f"{record.msg} [{record.origin:s}]" print(": " + record.message, file=stream) class FilterOrigin: """A filter for the record origin""" def __init__(self, origin): self.origin = origin def filter(self, record): return record.origin.startswith(self.origin) class ListHandler(logging.Handler): """A handler that can be used to capture the records in a list""" def __init__(self, filter_level=None, filter_origin=None): logging.Handler.__init__(self) self.log_list = [] def emit(self, record): self.log_list.append(record)
a50ec06a43b41fd3a7ccdd5ab5fe14de7564666d645554ff36f6e1ffa850b207
# Licensed under a 3-clause BSD style license - see LICENSE.rst # # Astropy documentation build configuration file. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this file. # # All configuration values have a default. Some values are defined in # the global Astropy configuration which is loaded here before anything else. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('..')) # IMPORTANT: the above commented section was generated by sphinx-quickstart, but # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left # commented out with this explanation to make it clear why this should not be # done. If the sys.path entry above is added, when the astropy.sphinx.conf # import occurs, it will import the *source* version of astropy instead of the # version installed (if invoked as "make html" or directly with sphinx), or the # version in the build directory. # Thus, any C-extensions that are needed to build the documentation will *not* # be accessible, and the documentation will not build correctly. # See sphinx_astropy.conf for which values are set there. import configparser import doctest import os import sys from datetime import datetime from importlib import metadata from packaging.requirements import Requirement from packaging.specifiers import SpecifierSet # -- Check for missing dependencies ------------------------------------------- missing_requirements = {} for line in metadata.requires("astropy"): if 'extra == "docs"' in line: req = Requirement(line.split(";")[0]) req_package = req.name.lower() req_specifier = str(req.specifier) try: version = metadata.version(req_package) except metadata.PackageNotFoundError: missing_requirements[req_package] = req_specifier if version not in SpecifierSet(req_specifier, prereleases=True): missing_requirements[req_package] = req_specifier if missing_requirements: print( "The following packages could not be found and are required to " "build the documentation:" ) for key, val in missing_requirements.items(): print(f" * {key} {val}") print('Please install the "docs" requirements.') sys.exit(1) from sphinx_astropy.conf.v1 import * # noqa: E402 from sphinx_astropy.conf.v1 import ( # noqa: E402 numpydoc_xref_aliases, numpydoc_xref_astropy_aliases, numpydoc_xref_ignore, rst_epilog, ) # -- Plot configuration ------------------------------------------------------- plot_rcparams = {} plot_rcparams["figure.figsize"] = (6, 6) plot_rcparams["savefig.facecolor"] = "none" plot_rcparams["savefig.bbox"] = "tight" plot_rcparams["axes.labelsize"] = "large" plot_rcparams["figure.subplot.hspace"] = 0.5 plot_apply_rcparams = True plot_html_show_source_link = False plot_formats = ["png", "svg", "pdf"] # Don't use the default - which includes a numpy and matplotlib import plot_pre_code = "" # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "1.7" # To perform a Sphinx version check that needs to be more specific than # major.minor, call `check_sphinx_version("X.Y.Z")` here. check_sphinx_version("1.2.1") # noqa: F405 # The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for # the benefit of other packages who want to refer to objects in the # astropy core. However, we don't want to cyclically reference astropy in its # own build so we remove it here. del intersphinx_mapping["astropy"] # noqa: F405 # add any custom intersphinx for astropy # fmt: off intersphinx_mapping["astropy-dev"] = ("https://docs.astropy.org/en/latest/", None) # noqa: F405 intersphinx_mapping["pyerfa"] = ("https://pyerfa.readthedocs.io/en/stable/", None) # noqa: F405 intersphinx_mapping["pytest"] = ("https://docs.pytest.org/en/stable/", None) # noqa: F405 intersphinx_mapping["ipython"] = ("https://ipython.readthedocs.io/en/stable/", None) # noqa: F405 intersphinx_mapping["pandas"] = ("https://pandas.pydata.org/pandas-docs/stable/", None) # noqa: F405 intersphinx_mapping["sphinx_automodapi"] = ("https://sphinx-automodapi.readthedocs.io/en/stable/", None) # noqa: F405 intersphinx_mapping["packagetemplate"] = ("https://docs.astropy.org/projects/package-template/en/latest/", None) # noqa: F405 intersphinx_mapping["h5py"] = ("https://docs.h5py.org/en/stable/", None) # noqa: F405 intersphinx_mapping["asdf-astropy"] = ("https://asdf-astropy.readthedocs.io/en/latest/", None) # noqa: F405 intersphinx_mapping["fsspec"] = ("https://filesystem-spec.readthedocs.io/en/latest/", None) # noqa: F405 # fmt: on # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns.append("_templates") # noqa: F405 exclude_patterns.append("changes") # noqa: F405 exclude_patterns.append("_pkgtemplate.rst") # noqa: F405 exclude_patterns.append( # noqa: F405 "**/*.inc.rst" ) # .inc.rst mean *include* files, don't have sphinx process them # Add any paths that contain templates here, relative to this directory. if "templates_path" not in locals(): # in case parent conf.py defines it templates_path = [] templates_path.append("_templates") extensions += ["sphinx_changelog"] # noqa: F405 # Grab minversion from setup.cfg setup_cfg = configparser.ConfigParser() setup_cfg.read(os.path.join(os.path.pardir, "setup.cfg")) __minimum_python_version__ = setup_cfg["options"]["python_requires"].replace(">=", "") project = "Astropy" min_versions = {} for line in metadata.requires("astropy"): req = Requirement(line.split(";")[0]) min_versions[req.name.lower()] = str(req.specifier) # This is added to the end of RST files - a good place to put substitutions to # be used globally. with open("common_links.txt") as cl: rst_epilog += cl.read().format( minimum_python=__minimum_python_version__, **min_versions ) # Manually register doctest options since matplotlib 3.5 messed up allowing them # from pytest-doctestplus IGNORE_OUTPUT = doctest.register_optionflag("IGNORE_OUTPUT") REMOTE_DATA = doctest.register_optionflag("REMOTE_DATA") FLOAT_CMP = doctest.register_optionflag("FLOAT_CMP") # Whether to create cross-references for the parameter types in the # Parameters, Other Parameters, Returns and Yields sections of the docstring. numpydoc_xref_param_type = True # Words not to cross-reference. Most likely, these are common words used in # parameter type descriptions that may be confused for classes of the same # name. The base set comes from sphinx-astropy. We add more here. numpydoc_xref_ignore.update( { "mixin", "Any", # aka something that would be annotated with `typing.Any` # needed in subclassing numpy # TODO! revisit "Arguments", "Path", # TODO! not need to ignore. "flag", "bits", } ) # Mappings to fully qualified paths (or correct ReST references) for the # aliases/shortcuts used when specifying the types of parameters. # Numpy provides some defaults # https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94 # and a base set comes from sphinx-astropy. # so here we mostly need to define Astropy-specific x-refs numpydoc_xref_aliases.update( { # python & adjacent "Any": "`~typing.Any`", "file-like": ":term:`python:file-like object`", "file": ":term:`python:file object`", "path-like": ":term:`python:path-like object`", "module": ":term:`python:module`", "buffer-like": ":term:buffer-like", "hashable": ":term:`python:hashable`", # for matplotlib "color": ":term:`color`", # for numpy "ints": ":class:`python:int`", # for astropy "number": ":term:`number`", "Representation": ":class:`~astropy.coordinates.BaseRepresentation`", "writable": ":term:`writable file-like object`", "readable": ":term:`readable file-like object`", "BaseHDU": ":doc:`HDU </io/fits/api/hdus>`", } ) # Add from sphinx-astropy 1) glossary aliases 2) physical types. numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases) # Turn off table of contents entries for functions and classes toc_object_entries = False # -- Project information ------------------------------------------------------ author = "The Astropy Developers" copyright = f"2011–{datetime.utcnow().year}, " + author # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # The full version, including alpha/beta/rc tags. release = metadata.version(project) # The short X.Y version. version = ".".join(release.split(".")[:2]) # Only include dev docs in dev version. dev = "dev" in release if not dev: exclude_patterns.append("development/*") # noqa: F405 exclude_patterns.append("testhelpers.rst") # noqa: F405 # -- Options for the module index --------------------------------------------- modindex_common_prefix = ["astropy."] # -- Options for HTML output --------------------------------------------------- # A NOTE ON HTML THEMES # # The global astropy configuration uses a custom theme, # 'bootstrap-astropy', which is installed along with astropy. The # theme has options for controlling the text of the logo in the upper # left corner. This is how you would specify the options in order to # override the theme defaults (The following options *are* the # defaults, so we do not actually need to set them here.) # html_theme_options = { # 'logotext1': 'astro', # white, semi-bold # 'logotext2': 'py', # orange, light # 'logotext3': ':docs' # white, light # } # A different theme can be used, or other parts of this theme can be # modified, by overriding some of the variables set in the global # configuration. The variables set in the global configuration are # listed below, commented out. # Add any paths that contain custom themes here, relative to this directory. # To use a different custom theme, add the directory containing the theme. # html_theme_path = [] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. To override the custom theme, set this to the # name of a builtin theme or the name of a custom theme in html_theme_path. # html_theme = None # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = '' # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = f"{project} v{release}" # Output file base name for HTML help builder. htmlhelp_basename = project + "doc" # A dictionary of values to pass into the template engine’s context for all pages. html_context = {"to_be_indexed": ["stable", "latest"], "is_development": dev} # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ("index", project + ".tex", project + " Documentation", author, "manual") ] latex_logo = "_static/astropy_logo.pdf" # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [("index", project.lower(), project + " Documentation", [author], 1)] # Setting this URL is requited by sphinx-astropy github_issues_url = "https://github.com/astropy/astropy/issues/" edit_on_github_branch = "main" # Enable nitpicky mode - which ensures that all references in the docs # resolve. nitpicky = True # This is not used. See docs/nitpick-exceptions file for the actual listing. nitpick_ignore = [] for line in open("nitpick-exceptions"): if line.strip() == "" or line.startswith("#"): continue dtype, target = line.split(None, 1) target = target.strip() nitpick_ignore.append((dtype, target)) # -- Options for the Sphinx gallery ------------------------------------------- try: import warnings import sphinx_gallery extensions += ["sphinx_gallery.gen_gallery"] sphinx_gallery_conf = { "backreferences_dir": "generated/modules", # path to store the module using example template "filename_pattern": "^((?!skip_).)*$", # execute all examples except those that start with "skip_" "examples_dirs": f"..{os.sep}examples", # path to the examples scripts "gallery_dirs": "generated/examples", # path to save gallery generated examples "reference_url": { "astropy": None, "matplotlib": "https://matplotlib.org/stable/", # The stable numpy search js isn't loadable at the moment (2022-12-07) # It seems to be valid js but it's not valid json so sphinx wont load it. "numpy": "https://numpy.org/devdocs/", }, "abort_on_example_error": True, } # Filter out backend-related warnings as described in # https://github.com/sphinx-gallery/sphinx-gallery/pull/564 warnings.filterwarnings( "ignore", category=UserWarning, message=( "Matplotlib is currently using agg, which is a" " non-GUI backend, so cannot show the figure." ), ) except ImportError: sphinx_gallery = None # -- Options for linkcheck output ------------------------------------------- linkcheck_retry = 5 linkcheck_ignore = [ "https://journals.aas.org/manuscript-preparation/", "https://maia.usno.navy.mil/", "https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer", "https://aa.usno.navy.mil/publications/docs/Circular_179.php", "http://data.astropy.org", "https://doi.org/10.1017/S0251107X00002406", # internal server error "https://doi.org/10.1017/pasa.2013.31", # internal server error "https://www.tandfonline.com/", # 403 Client Error: Forbidden "https://pyfits.readthedocs.io/en/v3.2.1/", # defunct page in CHANGES.rst r"https://github\.com/astropy/astropy/(?:issues|pull)/\d+", ] linkcheck_timeout = 180 linkcheck_anchors = False # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. html_extra_path = ["robots.txt"] def rstjinja(app, docname, source): """Render pages as a jinja template to hide/show dev docs.""" # Make sure we're outputting HTML if app.builder.format != "html": return files_to_render = ["index", "install"] if docname in files_to_render: print(f"Jinja rendering {docname}") rendered = app.builder.templates.render_string( source[0], app.config.html_context ) source[0] = rendered def resolve_astropy_and_dev_reference(app, env, node, contnode): """ Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases. Documentation links in astropy can be set up as intersphinx links so that affiliate packages do not have to override the docstrings when building the docs. If we are building the development docs it is a local ref targeting the label ``astropy-dev:<label>``, but for stable docs it should be an intersphinx resolution to the development docs. See https://github.com/astropy/astropy/issues/11366 """ # should the node be processed? reftarget = node.get("reftarget") # str or None if str(reftarget).startswith("astropy:"): # This allows Astropy to use intersphinx links to itself and have # them resolve to local links. Downstream packages will see intersphinx. # TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented. process, replace = True, "astropy:" elif dev and str(reftarget).startswith("astropy-dev:"): process, replace = True, "astropy-dev:" else: process, replace = False, "" # make link local if process: reftype = node.get("reftype") refdoc = node.get("refdoc", app.env.docname) # convert astropy intersphinx targets to local links. # there are a few types of intersphinx link patters, as described in # https://docs.readthedocs.io/en/stable/guides/intersphinx.html reftarget = reftarget.replace(replace, "") if reftype == "doc": # also need to replace the doc link node.replace_attr("reftarget", reftarget) # Delegate to the ref node's original domain/target (typically :ref:) try: domain = app.env.domains[node["refdomain"]] return domain.resolve_xref( app.env, refdoc, app.builder, reftype, reftarget, node, contnode ) except Exception: pass # Otherwise return None which should delegate to intersphinx def setup(app): if sphinx_gallery is None: msg = ( "The sphinx_gallery extension is not installed, so the " "gallery will not be built. You will probably see " "additional warnings about undefined references due " "to this." ) try: app.warn(msg) except AttributeError: # Sphinx 1.6+ from sphinx.util import logging logger = logging.getLogger(__name__) logger.warning(msg) # Generate the page from Jinja template app.connect("source-read", rstjinja) # Set this to higher priority than intersphinx; this way when building # dev docs astropy-dev: targets will go to the local docs instead of the # intersphinx mapping app.connect("missing-reference", resolve_astropy_and_dev_reference, priority=400)
bd7143eaa3b20174671c856bd9a2162f26f8d6fed421b306980082c15baf5c3f
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file needs to be included here to make sure commands such # as ``pytest docs/...`` works, since this # will ignore the conftest.py file at the root of the repository # and the one in astropy/conftest.py import os import tempfile import pytest # Make sure we use temporary directories for the config and cache # so that the tests are insensitive to local configuration. os.environ["XDG_CONFIG_HOME"] = tempfile.mkdtemp("astropy_config") os.environ["XDG_CACHE_HOME"] = tempfile.mkdtemp("astropy_cache") os.mkdir(os.path.join(os.environ["XDG_CONFIG_HOME"], "astropy")) os.mkdir(os.path.join(os.environ["XDG_CACHE_HOME"], "astropy")) # Note that we don't need to change the environment variables back or remove # them after testing, because they are only changed for the duration of the # Python process, and this configuration only matters if running pytest # directly, not from e.g. an IPython session. @pytest.fixture(autouse=True) def _docdir(request): """Run doctests in isolated tmp_path so outputs do not end up in repo""" # Trigger ONLY for doctestplus doctest_plugin = request.config.pluginmanager.getplugin("doctestplus") if isinstance(request.node.parent, doctest_plugin._doctest_textfile_item_cls): # Don't apply this fixture to io.rst. It reads files and doesn't write. # Implementation from https://github.com/pytest-dev/pytest/discussions/10437 if "io.rst" not in request.node.name: old_cwd = os.getcwd() tmp_path = request.getfixturevalue("tmp_path") os.chdir(tmp_path) yield os.chdir(old_cwd) else: yield else: yield
d23437760dbb6d7d92b9d7b7b3c5ad7618850ebf76c7dd51afd2756aad94c3b3
import os import shutil import sys import erfa # noqa: F401 import matplotlib import pytest import astropy # noqa: F401 if len(sys.argv) == 3 and sys.argv[1] == "--astropy-root": ROOT = sys.argv[2] else: # Make sure we don't allow any arguments to be passed - some tests call # sys.executable which becomes this script when producing a pyinstaller # bundle, but we should just error in this case since this is not the # regular Python interpreter. if len(sys.argv) > 1: print("Extra arguments passed, exiting early") sys.exit(1) for root, dirnames, files in os.walk(os.path.join(ROOT, "astropy")): # NOTE: we can't simply use # test_root = root.replace('astropy', 'astropy_tests') # as we only want to change the one which is for the module, so instead # we search for the last occurrence and replace that. pos = root.rfind("astropy") test_root = root[:pos] + "astropy_tests" + root[pos + 7 :] # Copy over the astropy 'tests' directories and their contents for dirname in dirnames: final_dir = os.path.relpath(os.path.join(test_root, dirname), ROOT) # We only copy over 'tests' directories, but not astropy/tests (only # astropy/tests/tests) since that is not just a directory with tests. if dirname == "tests" and not root.endswith("astropy"): shutil.copytree(os.path.join(root, dirname), final_dir, dirs_exist_ok=True) else: # Create empty __init__.py files so that 'astropy_tests' still # behaves like a single package, otherwise pytest gets confused # by the different conftest.py files. init_filename = os.path.join(final_dir, "__init__.py") if not os.path.exists(os.path.join(final_dir, "__init__.py")): os.makedirs(final_dir, exist_ok=True) with open(os.path.join(final_dir, "__init__.py"), "w") as f: f.write("#") # Copy over all conftest.py files for file in files: if file == "conftest.py": final_file = os.path.relpath(os.path.join(test_root, file), ROOT) shutil.copy2(os.path.join(root, file), final_file) # Add the top-level __init__.py file with open(os.path.join("astropy_tests", "__init__.py"), "w") as f: f.write("#") # Remove test file that tries to import all sub-packages at collection time os.remove( os.path.join("astropy_tests", "utils", "iers", "tests", "test_leap_second.py") ) # Remove convolution tests for now as there are issues with the loading of the C extension. # FIXME: one way to fix this would be to migrate the convolution C extension away from using # ctypes and using the regular extension mechanism instead. shutil.rmtree(os.path.join("astropy_tests", "convolution")) os.remove(os.path.join("astropy_tests", "modeling", "tests", "test_convolution.py")) os.remove(os.path.join("astropy_tests", "modeling", "tests", "test_core.py")) os.remove(os.path.join("astropy_tests", "visualization", "tests", "test_lupton_rgb.py")) # FIXME: PIL minversion check does not work os.remove( os.path.join("astropy_tests", "visualization", "wcsaxes", "tests", "test_misc.py") ) os.remove( os.path.join("astropy_tests", "visualization", "wcsaxes", "tests", "test_wcsapi.py") ) # FIXME: The following tests rely on the fully qualified name of classes which # don't seem to be the same. os.remove(os.path.join("astropy_tests", "table", "mixins", "tests", "test_registry.py")) # Copy the top-level conftest.py shutil.copy2( os.path.join(ROOT, "astropy", "conftest.py"), os.path.join("astropy_tests", "conftest.py"), ) # matplotlib hook in pyinstaller 5.0 and later no longer collects every backend, see # https://github.com/pyinstaller/pyinstaller/issues/6760 matplotlib.use("svg") # We skip a few tests, which are generally ones that rely on explicitly # checking the name of the current module (which ends up starting with # astropy_tests rather than astropy). SKIP_TESTS = [ "test_exception_logging_origin", "test_log", "test_configitem", "test_config_noastropy_fallback", "test_no_home", "test_path", "test_rename_path", "test_data_name_third_party_package", "test_pkg_finder", "test_wcsapi_extension", "test_find_current_module_bundle", "test_minversion", "test_imports", "test_generate_config", "test_generate_config2", "test_create_config_file", "test_download_parallel_fills_cache", ] # Run the tests! sys.exit( pytest.main( ["astropy_tests", "-k " + " and ".join("not " + test for test in SKIP_TESTS)], plugins=[ "pytest_astropy.plugin", "pytest_doctestplus.plugin", "pytest_openfiles.plugin", "pytest_remotedata.plugin", "pytest_astropy_header.display", ], ) )
14ebfed90ad0cc86cb55a54105048432772669f6956d4b1cd0f685c3b4781977
""" ======================== Title of Example ======================== This example <verb> <active tense> <does something>. The example uses <packages> to <do something> and <other package> to <do other thing>. Include links to referenced packages like this: `astropy.io.fits` to show the astropy.io.fits or like this `~astropy.io.fits`to show just 'fits' *By: <names>* *License: BSD* """ ############################################################################## # Make print work the same in all versions of Python, set up numpy, # matplotlib, and use a nicer set of plot parameters: import matplotlib.pyplot as plt import numpy as np from astropy.visualization import astropy_mpl_style plt.style.use(astropy_mpl_style) # uncomment if including figures: # import matplotlib.pyplot as plt # from astropy.visualization import astropy_mpl_style # plt.style.use(astropy_mpl_style) ############################################################################## # This code block is executed, although it produces no output. Lines starting # with a simple hash are code comment and get treated as part of the code # block. To include this new comment string we started the new block with a # long line of hashes. # # The sphinx-gallery parser will assume everything after this splitter and that # continues to start with a **comment hash and space** (respecting code style) # is text that has to be rendered in # html format. Keep in mind to always keep your comments always together by # comment hashes. That means to break a paragraph you still need to comment # that line break. # # In this example the next block of code produces some plotable data. Code is # executed, figure is saved and then code is presented next, followed by the # inlined figure. x = np.linspace(-np.pi, np.pi, 300) xx, yy = np.meshgrid(x, x) z = np.cos(xx) + np.cos(yy) plt.figure() plt.imshow(z) plt.colorbar() plt.xlabel('$x$') plt.ylabel('$y$') ########################################################################### # Again it is possible to continue the discussion with a new Python string. This # time to introduce the next code block generates 2 separate figures. plt.figure() plt.imshow(z, cmap=plt.cm.get_cmap('hot')) plt.figure() plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none') ########################################################################## # There's some subtle differences between rendered html rendered comment # strings and code comment strings which I'll demonstrate below. (Some of this # only makes sense if you look at the # :download:`raw Python script <plot_notebook.py>`) # # Comments in comment blocks remain nested in the text. def dummy(): """Dummy function to make sure docstrings don't get rendered as text""" pass # Code comments not preceded by the hash splitter are left in code blocks. string = """ Triple-quoted string which tries to break parser but doesn't. """ ############################################################################ # Output of the script is captured: print('Some output from Python') ############################################################################ # Finally, I'll call ``show`` at the end just so someone running the Python # code directly will see the plots; this is not necessary for creating the docs plt.show()
0d4e57caf18cfd93b45eb7ea3eba4fcafbdf3f1b6bc444889c32bf3a617a01b9
""" ======================================================================== Transforming positions and velocities to and from a Galactocentric frame ======================================================================== This document shows a few examples of how to use and customize the `~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky positions, distance, proper motions, and radial velocities to a Galactocentric, Cartesian frame, and the same in reverse. The main configurable parameters of the `~astropy.coordinates.Galactocentric` frame control the position and velocity of the solar system barycenter within the Galaxy. These are specified by setting the ICRS coordinates of the Galactic center, the distance to the Galactic center (the sun-galactic center line is always assumed to be the x-axis of the Galactocentric frame), and the Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first demonstrate how to customize these values, then show how to set the solar motion instead by inputting the proper motion of Sgr A*. Note that, for brevity, we may refer to the solar system barycenter as just "the sun" in the examples below. *By: Adrian Price-Whelan* *License: BSD* """ ############################################################################## # Make `print` work the same in all versions of Python, set up numpy, # matplotlib, and use a nicer set of plot parameters: import matplotlib.pyplot as plt import numpy as np from astropy.visualization import astropy_mpl_style plt.style.use(astropy_mpl_style) ############################################################################## # Import the necessary astropy subpackages import astropy.coordinates as coord import astropy.units as u ############################################################################## # Let's first define a barycentric coordinate and velocity in the ICRS frame. # We'll use the data for the star HD 39881 from the `Simbad # <https://simbad.u-strasbg.fr/simbad/>`_ database: c1 = coord.SkyCoord(ra=89.014303*u.degree, dec=13.924912*u.degree, distance=(37.59*u.mas).to(u.pc, u.parallax()), pm_ra_cosdec=372.72*u.mas/u.yr, pm_dec=-483.69*u.mas/u.yr, radial_velocity=0.37*u.km/u.s, frame='icrs') ############################################################################## # This is a high proper-motion star; suppose we'd like to transform its position # and velocity to a Galactocentric frame to see if it has a large 3D velocity # as well. To use the Astropy default solar position and motion parameters, we # can simply do: gc1 = c1.transform_to(coord.Galactocentric) ############################################################################## # From here, we can access the components of the resulting # `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian # velocity components: print(gc1.v_x, gc1.v_y, gc1.v_z) ############################################################################## # The default parameters for the `~astropy.coordinates.Galactocentric` frame # are detailed in the linked documentation, but we can modify the most commonly # changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and # ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector # of the sun, and the height of the sun above the Galactic midplane, # respectively. The velocity of the sun can be specified as an # `~astropy.units.Quantity` object with velocity units and is interpreted as a # Cartesian velocity, as in the example below. Note that, as with the positions, # the Galactocentric frame is a right-handed system (i.e., the Sun is at negative # x values) so ``v_x`` is opposite of the Galactocentric radial velocity: v_sun = [11.1, 244, 7.25] * (u.km / u.s) # [vx, vy, vz] gc_frame = coord.Galactocentric( galcen_distance=8*u.kpc, galcen_v_sun=v_sun, z_sun=0*u.pc) ############################################################################## # We can then transform to this frame instead, with our custom parameters: gc2 = c1.transform_to(gc_frame) print(gc2.v_x, gc2.v_y, gc2.v_z) ############################################################################## # It's sometimes useful to specify the solar motion using the `proper motion # of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian # velocity components. With an assumed distance, we can convert proper motion # components to Cartesian velocity components using `astropy.units`: galcen_distance = 8*u.kpc pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004 vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles()) ############################################################################## # We still have to assume a line-of-sight velocity for the Galactic center, # which we will again take to be 11 km/s: vx = 11.1 * u.km/u.s v_sun2 = u.Quantity([vx, vy, vz]) # List of Quantity -> a single Quantity gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance, galcen_v_sun=v_sun2, z_sun=0*u.pc) gc3 = c1.transform_to(gc_frame2) print(gc3.v_x, gc3.v_y, gc3.v_z) ############################################################################## # The transformations also work in the opposite direction. This can be useful # for transforming simulated or theoretical data to observable quantities. As # an example, we'll generate 4 theoretical circular orbits at different # Galactocentric radii with the same circular velocity, and transform them to # Heliocentric coordinates: ring_distances = np.arange(10, 25+1, 5) * u.kpc circ_velocity = 220 * u.km/u.s phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths ring_rep = coord.CylindricalRepresentation( rho=ring_distances[:,np.newaxis], phi=phi_grid[np.newaxis], z=np.zeros_like(ring_distances)[:,np.newaxis]) angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr, u.dimensionless_angles()) ring_dif = coord.CylindricalDifferential( d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s, d_phi=angular_velocity[:,np.newaxis], d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s ) ring_rep = ring_rep.with_differentials(ring_dif) gc_rings = coord.SkyCoord(ring_rep, frame=coord.Galactocentric) ############################################################################## # First, let's visualize the geometry in Galactocentric coordinates. Here are # the positions and velocities of the rings; note that in the velocity plot, # the velocities of the 4 rings are identical and thus overlaid under the same # curve: fig,axes = plt.subplots(1, 2, figsize=(12,6)) # Positions axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3) axes[0].text(-8., 0, r'$\odot$', fontsize=20) axes[0].set_xlim(-30, 30) axes[0].set_ylim(-30, 30) axes[0].set_xlabel('$x$ [kpc]') axes[0].set_ylabel('$y$ [kpc]') # Velocities axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3) axes[1].set_xlim(-250, 250) axes[1].set_ylim(-250, 250) axes[1].set_xlabel(f"$v_x$ [{(u.km / u.s).to_string('latex_inline')}]") axes[1].set_ylabel(f"$v_y$ [{(u.km / u.s).to_string('latex_inline')}]") fig.tight_layout() plt.show() ############################################################################## # Now we can transform to Galactic coordinates and visualize the rings in # observable coordinates: gal_rings = gc_rings.transform_to(coord.Galactic) fig, ax = plt.subplots(1, 1, figsize=(8, 6)) for i in range(len(ring_distances)): ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value, label=str(ring_distances[i]), marker='None', linewidth=3) ax.set_xlim(360, 0) ax.set_xlabel('$l$ [deg]') ax.set_ylabel(fr'$\mu_l \, \cos b$ [{(u.mas/u.yr).to_string("latex_inline")}]') ax.legend() plt.show()
8448f597e96dd1a565d20bfe1ea8b6caf23531d2e99e81c1fd723e6ccd7f3129
""" =================================================================== Determining and plotting the altitude/azimuth of a celestial object =================================================================== This example demonstrates coordinate transformations and the creation of visibility curves to assist with observing run planning. In this example, we make a `~astropy.coordinates.SkyCoord` instance for M33. The altitude-azimuth coordinates are then found using `astropy.coordinates.EarthLocation` and `astropy.time.Time` objects. This example is meant to demonstrate the capabilities of the `astropy.coordinates` package. For more convenient and/or complex observation planning, consider the `astroplan <https://astroplan.readthedocs.org/>`_ package. *By: Erik Tollerud, Kelle Cruz* *License: BSD* """ ############################################################################## # Let's suppose you are planning to visit picturesque Bear Mountain State Park # in New York, USA. You're bringing your telescope with you (of course), and # someone told you M33 is a great target to observe there. You happen to know # you're free at 11:00 pm local time, and you want to know if it will be up. # Astropy can answer that. # # Import numpy and matplotlib. For the latter, use a nicer set of plot # parameters and set up support for plotting/converting quantities. import matplotlib.pyplot as plt import numpy as np from astropy.visualization import astropy_mpl_style, quantity_support plt.style.use(astropy_mpl_style) quantity_support() ############################################################################## # Import the packages necessary for finding coordinates and making # coordinate transformations import astropy.units as u from astropy.coordinates import AltAz, EarthLocation, SkyCoord from astropy.time import Time ############################################################################## # `astropy.coordinates.SkyCoord.from_name` uses Simbad to resolve object # names and retrieve coordinates. # # Get the coordinates of M33: m33 = SkyCoord.from_name('M33') ############################################################################## # Use `astropy.coordinates.EarthLocation` to provide the location of Bear # Mountain and set the time to 11pm EDT on 2012 July 12: bear_mountain = EarthLocation(lat=41.3*u.deg, lon=-74*u.deg, height=390*u.m) utcoffset = -4*u.hour # Eastern Daylight Time time = Time('2012-7-12 23:00:00') - utcoffset ############################################################################## # `astropy.coordinates.EarthLocation.get_site_names` and # `~astropy.coordinates.EarthLocation.get_site_names` can be used to get # locations of major observatories. # # Use `astropy.coordinates` to find the Alt, Az coordinates of M33 at as # observed from Bear Mountain at 11pm on 2012 July 12. m33altaz = m33.transform_to(AltAz(obstime=time,location=bear_mountain)) print(f"M33's Altitude = {m33altaz.alt:.2}") ############################################################################## # This is helpful since it turns out M33 is barely above the horizon at this # time. It's more informative to find M33's airmass over the course of # the night. # # Find the alt,az coordinates of M33 at 100 times evenly spaced between 10pm # and 7am EDT: midnight = Time('2012-7-13 00:00:00') - utcoffset delta_midnight = np.linspace(-2, 10, 100)*u.hour frame_July13night = AltAz(obstime=midnight+delta_midnight, location=bear_mountain) m33altazs_July13night = m33.transform_to(frame_July13night) ############################################################################## # convert alt, az to airmass with `~astropy.coordinates.AltAz.secz` attribute: m33airmasss_July13night = m33altazs_July13night.secz ############################################################################## # Plot the airmass as a function of time: plt.plot(delta_midnight, m33airmasss_July13night) plt.xlim(-2, 10) plt.ylim(1, 4) plt.xlabel('Hours from EDT Midnight') plt.ylabel('Airmass [Sec(z)]') plt.show() ############################################################################## # Use `~astropy.coordinates.get_sun` to find the location of the Sun at 1000 # evenly spaced times between noon on July 12 and noon on July 13: from astropy.coordinates import get_sun delta_midnight = np.linspace(-12, 12, 1000)*u.hour times_July12_to_13 = midnight + delta_midnight frame_July12_to_13 = AltAz(obstime=times_July12_to_13, location=bear_mountain) sunaltazs_July12_to_13 = get_sun(times_July12_to_13).transform_to(frame_July12_to_13) ############################################################################## # Do the same with `~astropy.coordinates.get_moon` to find when the moon is # up. Be aware that this will need to download a 10MB file from the internet # to get a precise location of the moon. from astropy.coordinates import get_moon moon_July12_to_13 = get_moon(times_July12_to_13) moonaltazs_July12_to_13 = moon_July12_to_13.transform_to(frame_July12_to_13) ############################################################################## # Find the alt,az coordinates of M33 at those same times: m33altazs_July12_to_13 = m33.transform_to(frame_July12_to_13) ############################################################################## # Make a beautiful figure illustrating nighttime and the altitudes of M33 and # the Sun over that time: plt.plot(delta_midnight, sunaltazs_July12_to_13.alt, color='r', label='Sun') plt.plot(delta_midnight, moonaltazs_July12_to_13.alt, color=[0.75]*3, ls='--', label='Moon') plt.scatter(delta_midnight, m33altazs_July12_to_13.alt, c=m33altazs_July12_to_13.az, label='M33', lw=0, s=8, cmap='viridis') plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg, sunaltazs_July12_to_13.alt < -0*u.deg, color='0.5', zorder=0) plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg, sunaltazs_July12_to_13.alt < -18*u.deg, color='k', zorder=0) plt.colorbar().set_label('Azimuth [deg]') plt.legend(loc='upper left') plt.xlim(-12*u.hour, 12*u.hour) plt.xticks((np.arange(13)*2-12)*u.hour) plt.ylim(0*u.deg, 90*u.deg) plt.xlabel('Hours from EDT Midnight') plt.ylabel('Altitude [deg]') plt.show()
fa696a84a04c2bf7e9d439cfd33685071eee8633b6df74a3a7bdb5af6aee3c30
""" ================================================================ Convert a radial velocity to the Galactic Standard of Rest (GSR) ================================================================ Radial or line-of-sight velocities of sources are often reported in a Heliocentric or Solar-system barycentric reference frame. A common transformation incorporates the projection of the Sun's motion along the line-of-sight to the target, hence transforming it to a Galactic rest frame instead (sometimes referred to as the Galactic Standard of Rest, GSR). This transformation depends on the assumptions about the orientation of the Galactic frame relative to the bary- or Heliocentric frame. It also depends on the assumed solar velocity vector. Here we'll demonstrate how to perform this transformation using a sky position and barycentric radial-velocity. *By: Adrian Price-Whelan* *License: BSD* """ ################################################################################ # Import the required Astropy packages: import astropy.coordinates as coord import astropy.units as u ################################################################################ # Use the latest convention for the Galactocentric coordinates coord.galactocentric_frame_defaults.set('latest') ################################################################################ # For this example, let's work with the coordinates and barycentric radial # velocity of the star HD 155967, as obtained from # `Simbad <https://simbad.u-strasbg.fr/simbad/>`_: icrs = coord.SkyCoord(ra=258.58356362*u.deg, dec=14.55255619*u.deg, radial_velocity=-16.1*u.km/u.s, frame='icrs') ################################################################################ # We next need to decide on the velocity of the Sun in the assumed GSR frame. # We'll use the same velocity vector as used in the # `~astropy.coordinates.Galactocentric` frame, and convert it to a # `~astropy.coordinates.CartesianRepresentation` object using the # ``.to_cartesian()`` method of the # `~astropy.coordinates.CartesianDifferential` object ``galcen_v_sun``: v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian() ################################################################################ # We now need to get a unit vector in the assumed Galactic frame from the sky # position in the ICRS frame above. We'll use this unit vector to project the # solar velocity onto the line-of-sight: gal = icrs.transform_to(coord.Galactic) cart_data = gal.data.to_cartesian() unit_vector = cart_data / cart_data.norm() ################################################################################ # Now we project the solar velocity using this unit vector: v_proj = v_sun.dot(unit_vector) ################################################################################ # Finally, we add the projection of the solar velocity to the radial velocity # to get a GSR radial velocity: rv_gsr = icrs.radial_velocity + v_proj print(rv_gsr) ################################################################################ # We could wrap this in a function so we can control the solar velocity and # re-use the above code: def rv_to_gsr(c, v_sun=None): """Transform a barycentric radial velocity to the Galactic Standard of Rest (GSR). The input radial velocity must be passed in as a Parameters ---------- c : `~astropy.coordinates.BaseCoordinateFrame` subclass instance The radial velocity, associated with a sky coordinates, to be transformed. v_sun : `~astropy.units.Quantity`, optional The 3D velocity of the solar system barycenter in the GSR frame. Defaults to the same solar motion as in the `~astropy.coordinates.Galactocentric` frame. Returns ------- v_gsr : `~astropy.units.Quantity` The input radial velocity transformed to a GSR frame. """ if v_sun is None: v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian() gal = c.transform_to(coord.Galactic) cart_data = gal.data.to_cartesian() unit_vector = cart_data / cart_data.norm() v_proj = v_sun.dot(unit_vector) return c.radial_velocity + v_proj rv_gsr = rv_to_gsr(icrs) print(rv_gsr)
7ef96384215c25fdded5109f770d6bdb87fa86fbadfa15e6ee3804adfb78ead3
r""" ========================================================== Create a new coordinate class (for the Sagittarius stream) ========================================================== This document describes in detail how to subclass and define a custom spherical coordinate frame, as discussed in :ref:`astropy:astropy-coordinates-design` and the docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we will define a coordinate system defined by the plane of orbit of the Sagittarius Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr coordinate system is often referred to in terms of two angular coordinates, :math:`\Lambda,B`. To do this, we need to define a subclass of `~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the coordinate system angles in each of the supported representations. In this case we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and "Beta". Then we have to define the transformation from this coordinate system to some other built-in system. Here we will use Galactic coordinates, represented by the `~astropy.coordinates.Galactic` class. See Also -------- * The `gala package <http://gala.adrian.pw/>`_, which defines a number of Astropy coordinate frames for stellar stream coordinate systems. * Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms", https://arxiv.org/abs/astro-ph/0304198 * Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132 * David Law's Sgr info page https://www.stsci.edu/~dlaw/Sgr/ *By: Adrian Price-Whelan, Erik Tollerud* *License: BSD* """ ############################################################################## # Make `print` work the same in all versions of Python, set up numpy, # matplotlib, and use a nicer set of plot parameters: import matplotlib.pyplot as plt import numpy as np from astropy.visualization import astropy_mpl_style plt.style.use(astropy_mpl_style) ############################################################################## # Import the packages necessary for coordinates import astropy.coordinates as coord import astropy.units as u from astropy.coordinates import frame_transform_graph from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix ############################################################################## # The first step is to create a new class, which we'll call # ``Sagittarius`` and make it a subclass of # `~astropy.coordinates.BaseCoordinateFrame`: class Sagittarius(coord.BaseCoordinateFrame): """ A Heliocentric spherical coordinate system defined by the orbit of the Sagittarius dwarf galaxy, as described in https://ui.adsabs.harvard.edu/abs/2003ApJ...599.1082M and further explained in https://www.stsci.edu/~dlaw/Sgr/. Parameters ---------- representation : `~astropy.coordinates.BaseRepresentation` or None A representation object or None to have no data (or use the other keywords) Lambda : `~astropy.coordinates.Angle`, optional, must be keyword The longitude-like angle corresponding to Sagittarius' orbit. Beta : `~astropy.coordinates.Angle`, optional, must be keyword The latitude-like angle corresponding to Sagittarius' orbit. distance : `~astropy.units.Quantity`, optional, must be keyword The Distance for this object along the line-of-sight. pm_Lambda_cosBeta : `~astropy.units.Quantity`, optional, must be keyword The proper motion along the stream in ``Lambda`` (including the ``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given). pm_Beta : `~astropy.units.Quantity`, optional, must be keyword The proper motion in Declination for this object (``pm_ra_cosdec`` must also be given). radial_velocity : `~astropy.units.Quantity`, optional, keyword-only The radial velocity of this object. """ default_representation = coord.SphericalRepresentation default_differential = coord.SphericalCosLatDifferential frame_specific_representation_info = { coord.SphericalRepresentation: [ coord.RepresentationMapping('lon', 'Lambda'), coord.RepresentationMapping('lat', 'Beta'), coord.RepresentationMapping('distance', 'distance')] } ############################################################################## # Breaking this down line-by-line, we define the class as a subclass of # `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive # docstring. The final lines are class-level attributes that specify the # default representation for the data, default differential for the velocity # information, and mappings from the attribute names used by representation # objects to the names that are to be used by the ``Sagittarius`` frame. In this # case we override the names in the spherical representations but don't do # anything with other representations like cartesian or cylindrical. # # Next we have to define the transformation from this coordinate system to some # other built-in coordinate system; we will use Galactic coordinates. We can do # this by defining functions that return transformation matrices, or by simply # defining a function that accepts a coordinate and returns a new coordinate in # the new system. Because the transformation to the Sagittarius coordinate # system is just a spherical rotation from Galactic coordinates, we'll just # define a function that returns this matrix. We'll start by constructing the # transformation matrix using pre-determined Euler angles and the # ``rotation_matrix`` helper function: SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010) SGR_THETA = (90 - 13.46) * u.degree SGR_PSI = (180 + 14.111534) * u.degree # Generate the rotation matrix using the x-convention (see Goldstein) SGR_MATRIX = ( np.diag([1.,1.,-1.]) @ rotation_matrix(SGR_PSI, "z") @ rotation_matrix(SGR_THETA, "x") @ rotation_matrix(SGR_PHI, "z") ) ############################################################################## # Since we already constructed the transformation (rotation) matrix above, and # the inverse of a rotation matrix is just its transpose, the required # transformation functions are very simple: @frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius) def galactic_to_sgr(): """ Compute the transformation matrix from Galactic spherical to heliocentric Sgr coordinates. """ return SGR_MATRIX ############################################################################## # The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform, # coord.Galactic, Sagittarius)`` registers this function on the # ``frame_transform_graph`` as a coordinate transformation. Inside the function, # we simply return the previously defined rotation matrix. # # We then register the inverse transformation by using the transpose of the # rotation matrix (which is faster to compute than the inverse): @frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic) def sgr_to_galactic(): """ Compute the transformation matrix from heliocentric Sgr coordinates to spherical Galactic. """ return matrix_transpose(SGR_MATRIX) ############################################################################## # Now that we've registered these transformations between ``Sagittarius`` and # `~astropy.coordinates.Galactic`, we can transform between *any* coordinate # system and ``Sagittarius`` (as long as the other system has a path to # transform to `~astropy.coordinates.Galactic`). For example, to transform from # ICRS coordinates to ``Sagittarius``, we would do: icrs = coord.SkyCoord(280.161732*u.degree, 11.91934*u.degree, frame='icrs') sgr = icrs.transform_to(Sagittarius) print(sgr) ############################################################################## # Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this # case, a line along the ``Sagittarius`` x-y plane): sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian, Beta=np.zeros(128)*u.radian, frame='sagittarius') icrs = sgr.transform_to(coord.ICRS) print(icrs) ############################################################################## # As an example, we'll now plot the points in both coordinate systems: fig, axes = plt.subplots(2, 1, figsize=(8, 10), subplot_kw={'projection': 'aitoff'}) axes[0].set_title("Sagittarius") axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian, linestyle='none', marker='.') axes[1].set_title("ICRS") axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian, linestyle='none', marker='.') plt.show() ############################################################################## # This particular transformation is just a spherical rotation, which is a # special case of an Affine transformation with no vector offset. The # transformation of velocity components is therefore natively supported as # well: sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian, Beta=np.zeros(128)*u.radian, pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr, pm_Beta=np.zeros(128)*u.mas/u.yr, frame='sagittarius') icrs = sgr.transform_to(coord.ICRS) print(icrs) fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True) axes[0].set_title("Sagittarius") axes[0].plot(sgr.Lambda.degree, sgr.pm_Lambda_cosBeta.value, linestyle='none', marker='.') axes[0].set_xlabel(r"$\Lambda$ [deg]") axes[0].set_ylabel( fr"$\mu_\Lambda \, \cos B$ [{sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')}]") axes[1].set_title("ICRS") axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value, linestyle='none', marker='.') axes[1].set_ylabel( fr"$\mu_\alpha \, \cos\delta$ [{icrs.pm_ra_cosdec.unit.to_string('latex_inline')}]") axes[2].set_title("ICRS") axes[2].plot(icrs.ra.degree, icrs.pm_dec.value, linestyle='none', marker='.') axes[2].set_xlabel("RA [deg]") axes[2].set_ylabel( fr"$\mu_\delta$ [{icrs.pm_dec.unit.to_string('latex_inline')}]") plt.show()
51e8664af769c49b9fd938ea4fabc8d8083275e145b9f7c56aaa04d3cb02ba8e
""" ===================================================== Create a multi-extension FITS (MEF) file from scratch ===================================================== This example demonstrates how to create a multi-extension FITS (MEF) file from scratch using `astropy.io.fits`. *By: Erik Bray* *License: BSD* """ import os from astropy.io import fits ############################################################################## # HDUList objects are used to hold all the HDUs in a FITS file. This # ``HDUList`` class is a subclass of Python's builtin `list` and can be # created from scratch. For example, to create a FITS file with # three extensions: new_hdul = fits.HDUList() new_hdul.append(fits.ImageHDU()) new_hdul.append(fits.ImageHDU()) ############################################################################## # Write out the new file to disk: new_hdul.writeto('test.fits') ############################################################################## # Alternatively, the HDU instances can be created first (or read from an # existing FITS file). # # Create a multi-extension FITS file with two empty IMAGE extensions (a # default PRIMARY HDU is prepended automatically if one is not specified; # we use ``overwrite=True`` to overwrite the file if it already exists): hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU() new_hdul = fits.HDUList([hdu1, hdu2]) new_hdul.writeto('test.fits', overwrite=True) ############################################################################## # Finally, we'll remove the file we created: os.remove('test.fits')
0b0d3551bf49108fdc8ad2bd73bcf056f03e2d5ed4b7becc21c05b9be78a849a
""" ================== Edit a FITS header ================== This example describes how to edit a value in a FITS header using `astropy.io.fits`. *By: Adrian Price-Whelan* *License: BSD* """ from astropy.io import fits from astropy.utils.data import get_pkg_data_filename ############################################################################## # Download a FITS file: fits_file = get_pkg_data_filename('tutorials/FITS-Header/input_file.fits') ############################################################################## # Look at contents of the FITS file fits.info(fits_file) ############################################################################## # Look at the headers of the two extensions: print("Before modifications:") print() print("Extension 0:") print(repr(fits.getheader(fits_file, 0))) print() print("Extension 1:") print(repr(fits.getheader(fits_file, 1))) ############################################################################## # `astropy.io.fits` provides an object-oriented interface for reading and # interacting with FITS files, but for small operations (like this example) it # is often easier to use the # `convenience functions <https://docs.astropy.org/en/latest/io/fits/index.html#convenience-functions>`_. # # To edit a single header value in the header for extension 0, use the # `~astropy.io.fits.setval()` function. For example, set the OBJECT keyword # to 'M31': fits.setval(fits_file, 'OBJECT', value='M31') ############################################################################## # With no extra arguments, this will modify the header for extension 0, but # this can be changed using the ``ext`` keyword argument. For example, we can # specify extension 1 instead: fits.setval(fits_file, 'OBJECT', value='M31', ext=1) ############################################################################## # This can also be used to create a new keyword-value pair ("card" in FITS # lingo): fits.setval(fits_file, 'ANEWKEY', value='some value') ############################################################################## # Again, this is useful for one-off modifications, but can be inefficient # for operations like editing multiple headers in the same file # because `~astropy.io.fits.setval()` loads the whole file each time it # is called. To make several modifications, it's better to load the file once: with fits.open(fits_file, 'update') as f: for hdu in f: hdu.header['OBJECT'] = 'CAT' print("After modifications:") print() print("Extension 0:") print(repr(fits.getheader(fits_file, 0))) print() print("Extension 1:") print(repr(fits.getheader(fits_file, 1)))
f0a9c8c2045828483baf41056c9d2a713b4276104f34192366feeb452f71edc8
""" ======================================= Read and plot an image from a FITS file ======================================= This example opens an image stored in a FITS file and displays it to the screen. This example uses `astropy.utils.data` to download the file, `astropy.io.fits` to open the file, and `matplotlib.pyplot` to display the image. *By: Lia R. Corrales, Adrian Price-Whelan, Kelle Cruz* *License: BSD* """ ############################################################################## # Set up matplotlib and use a nicer set of plot parameters import matplotlib.pyplot as plt from astropy.visualization import astropy_mpl_style plt.style.use(astropy_mpl_style) ############################################################################## # Download the example FITS files used by this example: from astropy.io import fits from astropy.utils.data import get_pkg_data_filename image_file = get_pkg_data_filename('tutorials/FITS-images/HorseHead.fits') ############################################################################## # Use `astropy.io.fits.info()` to display the structure of the file: fits.info(image_file) ############################################################################## # Generally the image information is located in the Primary HDU, also known # as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image # data from this first extension using the keyword argument ``ext=0``: image_data = fits.getdata(image_file, ext=0) ############################################################################## # The data is now stored as a 2D numpy array. Print the dimensions using the # shape attribute: print(image_data.shape) ############################################################################## # Display the image data: plt.figure() plt.imshow(image_data, cmap='gray') plt.colorbar()
8e93c233288805502afdd5d26cb6aaeb47d123b5b0e8cf9e2145f9b156b2b256
""" ========================================== Create a very large FITS file from scratch ========================================== This example demonstrates how to create a large file (larger than will fit in memory) from scratch using `astropy.io.fits`. *By: Erik Bray* *License: BSD* """ ############################################################################## # Normally to create a single image FITS file one would do something like: import os import numpy as np from astropy.io import fits data = np.zeros((40000, 40000), dtype=np.float64) hdu = fits.PrimaryHDU(data=data) ############################################################################## # Then use the `astropy.io.fits.writeto()` method to write out the new # file to disk hdu.writeto('large.fits') ############################################################################## # However, a 40000 x 40000 array of doubles is nearly twelve gigabytes! Most # systems won't be able to create that in memory just to write out to disk. In # order to create such a large file efficiently requires a little extra work, # and a few assumptions. # # First, it is helpful to anticipate about how large (as in, how many keywords) # the header will have in it. FITS headers must be written in 2880 byte # blocks, large enough for 36 keywords per block (including the END keyword in # the final block). Typical headers have somewhere between 1 and 4 blocks, # though sometimes more. # # Since the first thing we write to a FITS file is the header, we want to write # enough header blocks so that there is plenty of padding in which to add new # keywords without having to resize the whole file. Say you want the header to # use 4 blocks by default. Then, excluding the END card which Astropy will add # automatically, create the header and pad it out to 36 * 4 cards. # # Create a stub array to initialize the HDU; its # exact size is irrelevant, as long as it has the desired number of # dimensions data = np.zeros((100, 100), dtype=np.float64) hdu = fits.PrimaryHDU(data=data) header = hdu.header while len(header) < (36 * 4 - 1): header.append() # Adds a blank card to the end ############################################################################## # Now adjust the NAXISn keywords to the desired size of the array, and write # only the header out to a file. Using the ``hdu.writeto()`` method will cause # astropy to "helpfully" reset the NAXISn keywords to match the size of the # dummy array. That is because it works hard to ensure that only valid FITS # files are written. Instead, we can write just the header to a file using the # `astropy.io.fits.Header.tofile` method: header['NAXIS1'] = 40000 header['NAXIS2'] = 40000 header.tofile('large.fits') ############################################################################## # Finally, grow out the end of the file to match the length of the # data (plus the length of the header). This can be done very efficiently on # most systems by seeking past the end of the file and writing a single byte, # like so: with open('large.fits', 'rb+') as fobj: # Seek past the length of the header, plus the length of the # Data we want to write. # 8 is the number of bytes per value, i.e. abs(header['BITPIX'])/8 # (this example is assuming a 64-bit float) # The -1 is to account for the final byte that we are about to # write: fobj.seek(len(header.tostring()) + (40000 * 40000 * 8) - 1) fobj.write(b'\0') ############################################################################## # More generally, this can be written: shape = tuple(header[f'NAXIS{ii}'] for ii in range(1, header['NAXIS']+1)) with open('large.fits', 'rb+') as fobj: fobj.seek(len(header.tostring()) + (np.product(shape) * np.abs(header['BITPIX']//8)) - 1) fobj.write(b'\0') ############################################################################## # On modern operating systems this will cause the file (past the header) to be # filled with zeros out to the ~12GB needed to hold a 40000 x 40000 image. On # filesystems that support sparse file creation (most Linux filesystems, but not # the HFS+ filesystem used by most Macs) this is a very fast, efficient # operation. On other systems your mileage may vary. # # This isn't the only way to build up a large file, but probably one of the # safest. This method can also be used to create large multi-extension FITS # files, with a little care. ############################################################################## # Finally, we'll remove the file we created: os.remove('large.fits')
9eba23a2f3259c331c61da0df98f75c5970516fa260d03cbcfba8f52da0afb1a
""" ===================================================================== Accessing data stored as a table in a multi-extension FITS (MEF) file ===================================================================== FITS files can often contain large amount of multi-dimensional data and tables. This example opens a FITS file with information from Chandra's HETG-S instrument. The example uses `astropy.utils.data` to download multi-extension FITS (MEF) file, `astropy.io.fits` to investigate the header, and `astropy.table.Table` to explore the data. *By: Lia Corrales, Adrian Price-Whelan, and Kelle Cruz* *License: BSD* """ ############################################################################## # Use `astropy.utils.data` subpackage to download the FITS file used in this # example. Also import `~astropy.table.Table` from the `astropy.table` subpackage # and `astropy.io.fits` from astropy.io import fits from astropy.table import Table from astropy.utils.data import get_pkg_data_filename ############################################################################## # Download a FITS file event_filename = get_pkg_data_filename('tutorials/FITS-tables/chandra_events.fits') ############################################################################## # Display information about the contents of the FITS file. fits.info(event_filename) ############################################################################## # Extension 1, EVENTS, is a Table that contains information about each X-ray # photon that hit Chandra's HETG-S detector. # # Use `~astropy.table.Table` to read the table events = Table.read(event_filename, hdu=1) ############################################################################## # Print the column names of the Events Table. print(events.columns) ############################################################################## # If a column contains unit information, it will have an associated # `astropy.units` object. print(events['energy'].unit) ############################################################################## # Print the data stored in the Energy column. print(events['energy'])
f3dba67320d45736520e92c6dda0de55582acd7ff3ab8c5a2657f7537e0df3c1
""" ===================================================== Convert a 3-color image (JPG) to separate FITS images ===================================================== This example opens an RGB JPEG image and writes out each channel as a separate FITS (image) file. This example uses `pillow <https://python-pillow.org>`_ to read the image, `matplotlib.pyplot` to display the image, and `astropy.io.fits` to save FITS files. *By: Erik Bray, Adrian Price-Whelan* *License: BSD* """ import matplotlib.pyplot as plt import numpy as np from PIL import Image from astropy.io import fits from astropy.visualization import astropy_mpl_style ############################################################################## # Set up matplotlib and use a nicer set of plot parameters plt.style.use(astropy_mpl_style) ############################################################################## # Load and display the original 3-color jpeg image: image = Image.open('Hs-2009-14-a-web.jpg') xsize, ysize = image.size print(f"Image size: {ysize} x {xsize}") print(f"Image bands: {image.getbands()}") ax = plt.imshow(image) ############################################################################## # Split the three channels (RGB) and get the data as Numpy arrays. The arrays # are flattened, so they are 1-dimensional: r, g, b = image.split() r_data = np.array(r.getdata()) # data is now an array of length ysize*xsize g_data = np.array(g.getdata()) b_data = np.array(b.getdata()) print(r_data.shape) ############################################################################## # Reshape the image arrays to be 2-dimensional: r_data = r_data.reshape(ysize, xsize) # data is now a matrix (ysize, xsize) g_data = g_data.reshape(ysize, xsize) b_data = b_data.reshape(ysize, xsize) print(r_data.shape) ############################################################################## # Write out the channels as separate FITS images. # Add and visualize header info red = fits.PrimaryHDU(data=r_data) red.header['LATOBS'] = "32:11:56" # add spurious header info red.header['LONGOBS'] = "110:56" red.writeto('red.fits') green = fits.PrimaryHDU(data=g_data) green.header['LATOBS'] = "32:11:56" green.header['LONGOBS'] = "110:56" green.writeto('green.fits') blue = fits.PrimaryHDU(data=b_data) blue.header['LATOBS'] = "32:11:56" blue.header['LONGOBS'] = "110:56" blue.writeto('blue.fits') from pprint import pprint pprint(red.header) ############################################################################## # Delete the files created import os os.remove('red.fits') os.remove('green.fits') os.remove('blue.fits')
e1d3e32d9a2d5513730be2ae3e2b4490dab455d1fa510caa582a33dda01dd718
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import numpy as np from numpy.core.multiarray import normalize_axis_index from astropy.stats._fast_sigma_clip import _sigma_clip_fast from astropy.stats.funcs import mad_std from astropy.units import Quantity from astropy.utils import isiterable from astropy.utils.compat.optional_deps import HAS_BOTTLENECK from astropy.utils.exceptions import AstropyUserWarning if HAS_BOTTLENECK: import bottleneck __all__ = ["SigmaClip", "sigma_clip", "sigma_clipped_stats"] def _move_tuple_axes_first(array, axis): """ Bottleneck can only take integer axis, not tuple, so this function takes all the axes to be operated on and combines them into the first dimension of the array so that we can then use axis=0. """ # Figure out how many axes we are operating over naxis = len(axis) # Add remaining axes to the axis tuple axis += tuple(i for i in range(array.ndim) if i not in axis) # The new position of each axis is just in order destination = tuple(range(array.ndim)) # Reorder the array so that the axes being operated on are at the # beginning array_new = np.moveaxis(array, axis, destination) # Collapse the dimensions being operated on into a single dimension # so that we can then use axis=0 with the bottleneck functions array_new = array_new.reshape((-1,) + array_new.shape[naxis:]) return array_new def _nanmean(array, axis=None): """Bottleneck nanmean function that handle tuple axis.""" if isinstance(axis, tuple): array = _move_tuple_axes_first(array, axis=axis) axis = 0 if isinstance(array, Quantity): return array.__array_wrap__(bottleneck.nanmean(array, axis=axis)) else: return bottleneck.nanmean(array, axis=axis) def _nanmedian(array, axis=None): """Bottleneck nanmedian function that handle tuple axis.""" if isinstance(axis, tuple): array = _move_tuple_axes_first(array, axis=axis) axis = 0 if isinstance(array, Quantity): return array.__array_wrap__(bottleneck.nanmedian(array, axis=axis)) else: return bottleneck.nanmedian(array, axis=axis) def _nanstd(array, axis=None, ddof=0): """Bottleneck nanstd function that handle tuple axis.""" if isinstance(axis, tuple): array = _move_tuple_axes_first(array, axis=axis) axis = 0 if isinstance(array, Quantity): return array.__array_wrap__(bottleneck.nanstd(array, axis=axis, ddof=ddof)) else: return bottleneck.nanstd(array, axis=axis, ddof=ddof) def _nanmadstd(array, axis=None): """mad_std function that ignores NaNs by default.""" return mad_std(array, axis=axis, ignore_nan=True) class SigmaClip: """ Class to perform sigma clipping. The data will be iterated over, each time rejecting values that are less or more than a specified number of standard deviations from a center value. Clipped (rejected) pixels are those where:: data < center - (sigma_lower * std) data > center + (sigma_upper * std) where:: center = cenfunc(data [, axis=]) std = stdfunc(data [, axis=]) Invalid data values (i.e., NaN or inf) are automatically clipped. For a functional interface to sigma clipping, see :func:`sigma_clip`. .. note:: `scipy.stats.sigmaclip` provides a subset of the functionality in this class. Also, its input data cannot be a masked array and it does not handle data that contains invalid values (i.e., NaN or inf). Also note that it uses the mean as the centering function. The equivalent settings to `scipy.stats.sigmaclip` are:: sigclip = SigmaClip(sigma=4., cenfunc='mean', maxiters=None) sigclip(data, axis=None, masked=False, return_bounds=True) Parameters ---------- sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or None, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or None, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or None, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If using a callable function/object and the ``axis`` keyword is used, then it must be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. stdfunc : {'std', 'mad_std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If using a callable function/object and the ``axis`` keyword is used, then it must be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. grow : float or `False`, optional Radius within which to mask the neighbouring pixels of those that fall outwith the clipping limits (only applied along ``axis``, if specified). As an example, for a 2D image a value of 1 will mask the nearest pixels in a cross pattern around each deviant pixel, while 1.5 will also reject the nearest diagonal neighbours and so on. See Also -------- sigma_clip, sigma_clipped_stats Notes ----- The best performance will typically be obtained by setting ``cenfunc`` and ``stdfunc`` to one of the built-in functions specified as as string. If one of the options is set to a string while the other has a custom callable, you may in some cases see better performance if you have the `bottleneck`_ package installed. .. _bottleneck: https://github.com/pydata/bottleneck Examples -------- This example uses a data array of random variates from a Gaussian distribution. We clip all points that are more than 2 sample standard deviations from the median. The result is a masked array, where the mask is `True` for clipped data:: >>> from astropy.stats import SigmaClip >>> from numpy.random import randn >>> randvar = randn(10000) >>> sigclip = SigmaClip(sigma=2, maxiters=5) >>> filtered_data = sigclip(randvar) This example clips all points that are more than 3 sigma relative to the sample *mean*, clips until convergence, returns an unmasked `~numpy.ndarray`, and modifies the data in-place:: >>> from astropy.stats import SigmaClip >>> from numpy.random import randn >>> from numpy import mean >>> randvar = randn(10000) >>> sigclip = SigmaClip(sigma=3, maxiters=None, cenfunc='mean') >>> filtered_data = sigclip(randvar, masked=False, copy=False) This example sigma clips along one axis:: >>> from astropy.stats import SigmaClip >>> from numpy.random import normal >>> from numpy import arange, diag, ones >>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5)) >>> sigclip = SigmaClip(sigma=2.3) >>> filtered_data = sigclip(data, axis=0) Note that along the other axis, no points would be clipped, as the standard deviation is higher. """ def __init__( self, sigma=3.0, sigma_lower=None, sigma_upper=None, maxiters=5, cenfunc="median", stdfunc="std", grow=False, ): self.sigma = sigma self.sigma_lower = sigma_lower or sigma self.sigma_upper = sigma_upper or sigma self.maxiters = maxiters or np.inf self.cenfunc = cenfunc self.stdfunc = stdfunc self._cenfunc_parsed = self._parse_cenfunc(cenfunc) self._stdfunc_parsed = self._parse_stdfunc(stdfunc) self._min_value = np.nan self._max_value = np.nan self._niterations = 0 self.grow = grow # This just checks that SciPy is available, to avoid failing # later than necessary if __call__ needs it: if self.grow: from scipy.ndimage import binary_dilation self._binary_dilation = binary_dilation def __repr__(self): return ( f"SigmaClip(sigma={self.sigma}, sigma_lower={self.sigma_lower}," f" sigma_upper={self.sigma_upper}, maxiters={self.maxiters}," f" cenfunc={self.cenfunc!r}, stdfunc={self.stdfunc!r}, grow={self.grow})" ) def __str__(self): lines = ["<" + self.__class__.__name__ + ">"] attrs = [ "sigma", "sigma_lower", "sigma_upper", "maxiters", "cenfunc", "stdfunc", "grow", ] for attr in attrs: lines.append(f" {attr}: {repr(getattr(self, attr))}") return "\n".join(lines) @staticmethod def _parse_cenfunc(cenfunc): if isinstance(cenfunc, str): if cenfunc == "median": if HAS_BOTTLENECK: cenfunc = _nanmedian else: cenfunc = np.nanmedian # pragma: no cover elif cenfunc == "mean": if HAS_BOTTLENECK: cenfunc = _nanmean else: cenfunc = np.nanmean # pragma: no cover else: raise ValueError(f"{cenfunc} is an invalid cenfunc.") return cenfunc @staticmethod def _parse_stdfunc(stdfunc): if isinstance(stdfunc, str): if stdfunc == "std": if HAS_BOTTLENECK: stdfunc = _nanstd else: stdfunc = np.nanstd # pragma: no cover elif stdfunc == "mad_std": stdfunc = _nanmadstd else: raise ValueError(f"{stdfunc} is an invalid stdfunc.") return stdfunc def _compute_bounds(self, data, axis=None): # ignore RuntimeWarning if the array (or along an axis) has only # NaNs with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) self._max_value = self._cenfunc_parsed(data, axis=axis) std = self._stdfunc_parsed(data, axis=axis) self._min_value = self._max_value - (std * self.sigma_lower) self._max_value += std * self.sigma_upper def _sigmaclip_fast( self, data, axis=None, masked=True, return_bounds=False, copy=True ): """ Fast C implementation for simple use cases. """ if isinstance(data, Quantity): data, unit = data.value, data.unit else: unit = None if copy is False and masked is False and data.dtype.kind != "f": raise Exception( "cannot mask non-floating-point array with NaN " "values, set copy=True or masked=True to avoid " "this." ) if axis is None: axis = -1 if data.ndim == 1 else tuple(range(data.ndim)) if not isiterable(axis): axis = normalize_axis_index(axis, data.ndim) data_reshaped = data transposed_shape = None else: # The gufunc implementation does not handle non-scalar axis # so we combine the dimensions together as the last # dimension and set axis=-1 axis = tuple(normalize_axis_index(ax, data.ndim) for ax in axis) transposed_axes = ( tuple(ax for ax in range(data.ndim) if ax not in axis) + axis ) data_transposed = data.transpose(transposed_axes) transposed_shape = data_transposed.shape data_reshaped = data_transposed.reshape( transposed_shape[: data.ndim - len(axis)] + (-1,) ) axis = -1 if data_reshaped.dtype.kind != "f" or data_reshaped.dtype.itemsize > 8: data_reshaped = data_reshaped.astype(float) mask = ~np.isfinite(data_reshaped) if np.any(mask): warnings.warn( "Input data contains invalid values (NaNs or " "infs), which were automatically clipped.", AstropyUserWarning, ) if isinstance(data_reshaped, np.ma.MaskedArray): mask |= data_reshaped.mask data = data.view(np.ndarray) data_reshaped = data_reshaped.view(np.ndarray) mask = np.broadcast_to(mask, data_reshaped.shape).copy() bound_lo, bound_hi = _sigma_clip_fast( data_reshaped, mask, self.cenfunc == "median", self.stdfunc == "mad_std", -1 if np.isinf(self.maxiters) else self.maxiters, self.sigma_lower, self.sigma_upper, axis=axis, ) with np.errstate(invalid="ignore"): mask |= data_reshaped < np.expand_dims(bound_lo, axis) mask |= data_reshaped > np.expand_dims(bound_hi, axis) if transposed_shape is not None: # Get mask in shape of data. mask = mask.reshape(transposed_shape) mask = mask.transpose( tuple(transposed_axes.index(ax) for ax in range(data.ndim)) ) if masked: result = np.ma.array(data, mask=mask, copy=copy) else: if copy: result = data.astype(float, copy=True) else: result = data result[mask] = np.nan if unit is not None: result = result << unit bound_lo = bound_lo << unit bound_hi = bound_hi << unit if return_bounds: return result, bound_lo, bound_hi else: return result def _sigmaclip_noaxis(self, data, masked=True, return_bounds=False, copy=True): """ Sigma clip when ``axis`` is None and ``grow`` is not >0. In this simple case, we remove clipped elements from the flattened array during each iteration. """ filtered_data = data.ravel() # remove masked values and convert to ndarray if isinstance(filtered_data, np.ma.MaskedArray): filtered_data = filtered_data.data[~filtered_data.mask] # remove invalid values good_mask = np.isfinite(filtered_data) if np.any(~good_mask): filtered_data = filtered_data[good_mask] warnings.warn( "Input data contains invalid values (NaNs or " "infs), which were automatically clipped.", AstropyUserWarning, ) nchanged = 1 iteration = 0 while nchanged != 0 and (iteration < self.maxiters): iteration += 1 size = filtered_data.size self._compute_bounds(filtered_data, axis=None) filtered_data = filtered_data[ (filtered_data >= self._min_value) & (filtered_data <= self._max_value) ] nchanged = size - filtered_data.size self._niterations = iteration if masked: # return a masked array and optional bounds filtered_data = np.ma.masked_invalid(data, copy=copy) # update the mask in place, ignoring RuntimeWarnings for # comparisons with NaN data values with np.errstate(invalid="ignore"): filtered_data.mask |= np.logical_or( data < self._min_value, data > self._max_value ) if return_bounds: return filtered_data, self._min_value, self._max_value else: return filtered_data def _sigmaclip_withaxis( self, data, axis=None, masked=True, return_bounds=False, copy=True ): """ Sigma clip the data when ``axis`` or ``grow`` is specified. In this case, we replace clipped values with NaNs as placeholder values. """ # float array type is needed to insert nans into the array filtered_data = data.astype(float) # also makes a copy # remove invalid values bad_mask = ~np.isfinite(filtered_data) if np.any(bad_mask): filtered_data[bad_mask] = np.nan warnings.warn( "Input data contains invalid values (NaNs or " "infs), which were automatically clipped.", AstropyUserWarning, ) # remove masked values and convert to plain ndarray if isinstance(filtered_data, np.ma.MaskedArray): filtered_data = np.ma.masked_invalid(filtered_data).astype(float) filtered_data = filtered_data.filled(np.nan) if axis is not None: # convert negative axis/axes if not isiterable(axis): axis = (axis,) axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis) # define the shape of min/max arrays so that they can be broadcast # with the data mshape = tuple( 1 if dim in axis else size for dim, size in enumerate(filtered_data.shape) ) if self.grow: # Construct a growth kernel from the specified radius in # pixels (consider caching this for re-use by subsequent # calls?): cenidx = int(self.grow) size = 2 * cenidx + 1 indices = np.mgrid[(slice(0, size),) * data.ndim] if axis is not None: for n, dim in enumerate(indices): # For any axes that we're not clipping over, set # their indices outside the growth radius, so masked # points won't "grow" in that dimension: if n not in axis: dim[dim != cenidx] = size kernel = sum((idx - cenidx) ** 2 for idx in indices) <= self.grow**2 del indices nchanged = 1 iteration = 0 while nchanged != 0 and (iteration < self.maxiters): iteration += 1 self._compute_bounds(filtered_data, axis=axis) if not np.isscalar(self._min_value): self._min_value = self._min_value.reshape(mshape) self._max_value = self._max_value.reshape(mshape) with np.errstate(invalid="ignore"): # Since these comparisons are always False for NaNs, the # resulting mask contains only newly-rejected pixels and # we can dilate it without growing masked pixels more # than once. new_mask = (filtered_data < self._min_value) | ( filtered_data > self._max_value ) if self.grow: new_mask = self._binary_dilation(new_mask, kernel) filtered_data[new_mask] = np.nan nchanged = np.count_nonzero(new_mask) del new_mask self._niterations = iteration if masked: # create an output masked array if copy: filtered_data = np.ma.MaskedArray( data, ~np.isfinite(filtered_data), copy=True ) else: # ignore RuntimeWarnings for comparisons with NaN data values with np.errstate(invalid="ignore"): out = np.ma.masked_invalid(data, copy=False) filtered_data = np.ma.masked_where( np.logical_or(out < self._min_value, out > self._max_value), out, copy=False, ) if return_bounds: return filtered_data, self._min_value, self._max_value else: return filtered_data def __call__(self, data, axis=None, masked=True, return_bounds=False, copy=True): """ Perform sigma clipping on the provided data. Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` The data to be sigma clipped. axis : None or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. masked : bool, optional If `True`, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If `False`, then a `~numpy.ndarray` is returned. The default is `True`. return_bounds : bool, optional If `True`, then the minimum and maximum clipping bounds are also returned. copy : bool, optional If `True`, then the ``data`` array will be copied. If `False` and ``masked=True``, then the returned masked array data will contain the same array as the input ``data`` (if ``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`). If `False` and ``masked=False``, the input data is modified in-place. The default is `True`. Returns ------- result : array-like If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values and where the input mask was `True`. If ``masked=False``, then a `~numpy.ndarray` is returned. If ``return_bounds=True``, then in addition to the masked array or array above, the minimum and maximum clipping bounds are returned. If ``masked=False`` and ``axis=None``, then the output array is a flattened 1D `~numpy.ndarray` where the clipped values have been removed. If ``return_bounds=True`` then the returned minimum and maximum thresholds are scalars. If ``masked=False`` and ``axis`` is specified, then the output `~numpy.ndarray` will have the same shape as the input ``data`` and contain ``np.nan`` where values were clipped. If the input ``data`` was a masked array, then the output `~numpy.ndarray` will also contain ``np.nan`` where the input mask was `True`. If ``return_bounds=True`` then the returned minimum and maximum clipping thresholds will be be `~numpy.ndarray`\\s. """ data = np.asanyarray(data) if data.size == 0: if masked: result = np.ma.MaskedArray(data) else: result = data if return_bounds: return result, self._min_value, self._max_value else: return result if isinstance(data, np.ma.MaskedArray) and data.mask.all(): if masked: result = data else: result = np.full(data.shape, np.nan) if return_bounds: return result, self._min_value, self._max_value else: return result # Shortcut for common cases where a fast C implementation can be # used. if ( self.cenfunc in ("mean", "median") and self.stdfunc in ("std", "mad_std") and axis is not None and not self.grow ): return self._sigmaclip_fast( data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy ) # These two cases are treated separately because when # ``axis=None`` we can simply remove clipped values from the # array. This is not possible when ``axis`` or ``grow`` is # specified. if axis is None and not self.grow: return self._sigmaclip_noaxis( data, masked=masked, return_bounds=return_bounds, copy=copy ) else: return self._sigmaclip_withaxis( data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy ) def sigma_clip( data, sigma=3, sigma_lower=None, sigma_upper=None, maxiters=5, cenfunc="median", stdfunc="std", axis=None, masked=True, return_bounds=False, copy=True, grow=False, ): """ Perform sigma-clipping on the provided data. The data will be iterated over, each time rejecting values that are less or more than a specified number of standard deviations from a center value. Clipped (rejected) pixels are those where:: data < center - (sigma_lower * std) data > center + (sigma_upper * std) where:: center = cenfunc(data [, axis=]) std = stdfunc(data [, axis=]) Invalid data values (i.e., NaN or inf) are automatically clipped. For an object-oriented interface to sigma clipping, see :class:`SigmaClip`. .. note:: `scipy.stats.sigmaclip` provides a subset of the functionality in this class. Also, its input data cannot be a masked array and it does not handle data that contains invalid values (i.e., NaN or inf). Also note that it uses the mean as the centering function. The equivalent settings to `scipy.stats.sigmaclip` are:: sigma_clip(sigma=4., cenfunc='mean', maxiters=None, axis=None, ... masked=False, return_bounds=True) Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` The data to be sigma clipped. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or None, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or None, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or None, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If using a callable function/object and the ``axis`` keyword is used, then it must be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. stdfunc : {'std', 'mad_std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If using a callable function/object and the ``axis`` keyword is used, then it must be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. axis : None or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. masked : bool, optional If `True`, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If `False`, then a `~numpy.ndarray` and the minimum and maximum clipping thresholds are returned. The default is `True`. return_bounds : bool, optional If `True`, then the minimum and maximum clipping bounds are also returned. copy : bool, optional If `True`, then the ``data`` array will be copied. If `False` and ``masked=True``, then the returned masked array data will contain the same array as the input ``data`` (if ``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`). If `False` and ``masked=False``, the input data is modified in-place. The default is `True`. grow : float or `False`, optional Radius within which to mask the neighbouring pixels of those that fall outwith the clipping limits (only applied along ``axis``, if specified). As an example, for a 2D image a value of 1 will mask the nearest pixels in a cross pattern around each deviant pixel, while 1.5 will also reject the nearest diagonal neighbours and so on. Returns ------- result : array-like If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values and where the input mask was `True`. If ``masked=False``, then a `~numpy.ndarray` is returned. If ``return_bounds=True``, then in addition to the masked array or array above, the minimum and maximum clipping bounds are returned. If ``masked=False`` and ``axis=None``, then the output array is a flattened 1D `~numpy.ndarray` where the clipped values have been removed. If ``return_bounds=True`` then the returned minimum and maximum thresholds are scalars. If ``masked=False`` and ``axis`` is specified, then the output `~numpy.ndarray` will have the same shape as the input ``data`` and contain ``np.nan`` where values were clipped. If the input ``data`` was a masked array, then the output `~numpy.ndarray` will also contain ``np.nan`` where the input mask was `True`. If ``return_bounds=True`` then the returned minimum and maximum clipping thresholds will be be `~numpy.ndarray`\\s. See Also -------- SigmaClip, sigma_clipped_stats Notes ----- The best performance will typically be obtained by setting ``cenfunc`` and ``stdfunc`` to one of the built-in functions specified as as string. If one of the options is set to a string while the other has a custom callable, you may in some cases see better performance if you have the `bottleneck`_ package installed. .. _bottleneck: https://github.com/pydata/bottleneck Examples -------- This example uses a data array of random variates from a Gaussian distribution. We clip all points that are more than 2 sample standard deviations from the median. The result is a masked array, where the mask is `True` for clipped data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5) This example clips all points that are more than 3 sigma relative to the sample *mean*, clips until convergence, returns an unmasked `~numpy.ndarray`, and does not copy the data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> from numpy import mean >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None, ... cenfunc=mean, masked=False, copy=False) This example sigma clips along one axis:: >>> from astropy.stats import sigma_clip >>> from numpy.random import normal >>> from numpy import arange, diag, ones >>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5)) >>> filtered_data = sigma_clip(data, sigma=2.3, axis=0) Note that along the other axis, no points would be clipped, as the standard deviation is higher. """ sigclip = SigmaClip( sigma=sigma, sigma_lower=sigma_lower, sigma_upper=sigma_upper, maxiters=maxiters, cenfunc=cenfunc, stdfunc=stdfunc, grow=grow, ) return sigclip( data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy ) def sigma_clipped_stats( data, mask=None, mask_value=None, sigma=3.0, sigma_lower=None, sigma_upper=None, maxiters=5, cenfunc="median", stdfunc="std", std_ddof=0, axis=None, grow=False, ): """ Calculate sigma-clipped statistics on the provided data. Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` Data array or object that can be converted to an array. mask : `numpy.ndarray` (bool), optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are excluded when computing the statistics. mask_value : float, optional A data value (e.g., ``0.0``) that is ignored when computing the statistics. ``mask_value`` will be masked in addition to any input ``mask``. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or None, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or None, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or None, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If using a callable function/object and the ``axis`` keyword is used, then it must be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. stdfunc : {'std', 'mad_std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If using a callable function/object and the ``axis`` keyword is used, then it must be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. std_ddof : int, optional The delta degrees of freedom for the standard deviation calculation. The divisor used in the calculation is ``N - std_ddof``, where ``N`` represents the number of elements. The default is 0. axis : None or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. grow : float or `False`, optional Radius within which to mask the neighbouring pixels of those that fall outwith the clipping limits (only applied along ``axis``, if specified). As an example, for a 2D image a value of 1 will mask the nearest pixels in a cross pattern around each deviant pixel, while 1.5 will also reject the nearest diagonal neighbours and so on. Notes ----- The best performance will typically be obtained by setting ``cenfunc`` and ``stdfunc`` to one of the built-in functions specified as as string. If one of the options is set to a string while the other has a custom callable, you may in some cases see better performance if you have the `bottleneck`_ package installed. .. _bottleneck: https://github.com/pydata/bottleneck Returns ------- mean, median, stddev : float The mean, median, and standard deviation of the sigma-clipped data. See Also -------- SigmaClip, sigma_clip """ if mask is not None: data = np.ma.MaskedArray(data, mask) if mask_value is not None: data = np.ma.masked_values(data, mask_value) if isinstance(data, np.ma.MaskedArray) and data.mask.all(): return np.ma.masked, np.ma.masked, np.ma.masked sigclip = SigmaClip( sigma=sigma, sigma_lower=sigma_lower, sigma_upper=sigma_upper, maxiters=maxiters, cenfunc=cenfunc, stdfunc=stdfunc, grow=grow, ) data_clipped = sigclip( data, axis=axis, masked=False, return_bounds=False, copy=True ) if HAS_BOTTLENECK: mean = _nanmean(data_clipped, axis=axis) median = _nanmedian(data_clipped, axis=axis) std = _nanstd(data_clipped, ddof=std_ddof, axis=axis) else: # pragma: no cover mean = np.nanmean(data_clipped, axis=axis) median = np.nanmedian(data_clipped, axis=axis) std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis) return mean, median, std
c151da4fed9add2294bd9c44325c6b41f8577bc51dee25f9563be316d4a1d98e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Bayesian Blocks for Time Series Analysis ======================================== Dynamic programming algorithm for solving a piecewise-constant model for various datasets. This is based on the algorithm presented in Scargle et al 2013 [1]_. This code was ported from the astroML project [2]_. Applications include: - finding an optimal histogram with adaptive bin widths - finding optimal segmentation of time series data - detecting inflection points in the rate of event data The primary interface to these routines is the :func:`bayesian_blocks` function. This module provides fitness functions suitable for three types of data: - Irregularly-spaced event data via the :class:`Events` class - Regularly-spaced event data via the :class:`RegularEvents` class - Irregularly-spaced point measurements via the :class:`PointMeasures` class For more fine-tuned control over the fitness functions used, it is possible to define custom :class:`FitnessFunc` classes directly and use them with the :func:`bayesian_blocks` routine. One common application of the Bayesian Blocks algorithm is the determination of optimal adaptive-width histogram bins. This uses the same fitness function as for irregularly-spaced time series events. The easiest interface for creating Bayesian Blocks histograms is the :func:`astropy.stats.histogram` function. References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S .. [2] https://www.astroml.org/ https://github.com//astroML/astroML/ .. [3] Bellman, R.E., Dreyfus, S.E., 1962. Applied Dynamic Programming. Princeton University Press, Princeton. https://press.princeton.edu/books/hardcover/9780691651873/applied-dynamic-programming .. [4] Bellman, R., Roth, R., 1969. Curve fitting by segmented straight lines. J. Amer. Statist. Assoc. 64, 1079–1084. https://www.tandfonline.com/doi/abs/10.1080/01621459.1969.10501038 """ import warnings from inspect import signature import numpy as np from astropy.utils.exceptions import AstropyUserWarning # TODO: implement other fitness functions from appendix C of Scargle 2013 __all__ = ["FitnessFunc", "Events", "RegularEvents", "PointMeasures", "bayesian_blocks"] def bayesian_blocks(t, x=None, sigma=None, fitness="events", **kwargs): r"""Compute optimal segmentation of data with Scargle's Bayesian Blocks This is a flexible implementation of the Bayesian Blocks algorithm described in Scargle 2013 [1]_. Parameters ---------- t : array-like data times (one dimensional, length N) x : array-like, optional data values sigma : array-like or float, optional data errors fitness : str or object the fitness function to use for the model. If a string, the following options are supported: - 'events' : binned or unbinned event data. Arguments are ``gamma``, which gives the slope of the prior on the number of bins, or ``ncp_prior``, which is :math:`-\ln({\tt gamma})`. - 'regular_events' : non-overlapping events measured at multiples of a fundamental tick rate, ``dt``, which must be specified as an additional argument. Extra arguments are ``p0``, which gives the false alarm probability to compute the prior, or ``gamma``, which gives the slope of the prior on the number of bins, or ``ncp_prior``, which is :math:`-\ln({\tt gamma})`. - 'measures' : fitness for a measured sequence with Gaussian errors. Extra arguments are ``p0``, which gives the false alarm probability to compute the prior, or ``gamma``, which gives the slope of the prior on the number of bins, or ``ncp_prior``, which is :math:`-\ln({\tt gamma})`. In all three cases, if more than one of ``p0``, ``gamma``, and ``ncp_prior`` is chosen, ``ncp_prior`` takes precedence over ``gamma`` which takes precedence over ``p0``. Alternatively, the fitness parameter can be an instance of :class:`FitnessFunc` or a subclass thereof. **kwargs : any additional keyword arguments will be passed to the specified :class:`FitnessFunc` derived class. Returns ------- edges : ndarray array containing the (N+1) edges defining the N bins Examples -------- .. testsetup:: >>> np.random.seed(12345) Event data: >>> t = np.random.normal(size=100) >>> edges = bayesian_blocks(t, fitness='events', p0=0.01) Event data with repeats: >>> t = np.random.normal(size=100) >>> t[80:] = t[:20] >>> edges = bayesian_blocks(t, fitness='events', p0=0.01) Regular event data: >>> dt = 0.05 >>> t = dt * np.arange(1000) >>> x = np.zeros(len(t)) >>> x[np.random.randint(0, len(t), len(t) // 10)] = 1 >>> edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt) Measured point data with errors: >>> t = 100 * np.random.random(100) >>> x = np.exp(-0.5 * (t - 50) ** 2) >>> sigma = 0.1 >>> x_obs = np.random.normal(x, sigma) >>> edges = bayesian_blocks(t, x_obs, sigma, fitness='measures') References ---------- .. [1] Scargle, J et al. (2013) https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S .. [2] Bellman, R.E., Dreyfus, S.E., 1962. Applied Dynamic Programming. Princeton University Press, Princeton. https://press.princeton.edu/books/hardcover/9780691651873/applied-dynamic-programming .. [3] Bellman, R., Roth, R., 1969. Curve fitting by segmented straight lines. J. Amer. Statist. Assoc. 64, 1079–1084. https://www.tandfonline.com/doi/abs/10.1080/01621459.1969.10501038 See Also -------- astropy.stats.histogram : compute a histogram using bayesian blocks """ FITNESS_DICT = { "events": Events, "regular_events": RegularEvents, "measures": PointMeasures, } fitness = FITNESS_DICT.get(fitness, fitness) if type(fitness) is type and issubclass(fitness, FitnessFunc): fitfunc = fitness(**kwargs) elif isinstance(fitness, FitnessFunc): fitfunc = fitness else: raise ValueError("fitness parameter not understood") return fitfunc.fit(t, x, sigma) class FitnessFunc: """Base class for bayesian blocks fitness functions Derived classes should overload the following method: ``fitness(self, **kwargs)``: Compute the fitness given a set of named arguments. Arguments accepted by fitness must be among ``[T_k, N_k, a_k, b_k, c_k]`` (See [1]_ for details on the meaning of these parameters). Additionally, other methods may be overloaded as well: ``__init__(self, **kwargs)``: Initialize the fitness function with any parameters beyond the normal ``p0`` and ``gamma``. ``validate_input(self, t, x, sigma)``: Enable specific checks of the input data (``t``, ``x``, ``sigma``) to be performed prior to the fit. ``compute_ncp_prior(self, N)``: If ``ncp_prior`` is not defined explicitly, this function is called in order to define it before fitting. This may be calculated from ``gamma``, ``p0``, or whatever method you choose. ``p0_prior(self, N)``: Specify the form of the prior given the false-alarm probability ``p0`` (See [1]_ for details). For examples of implemented fitness functions, see :class:`Events`, :class:`RegularEvents`, and :class:`PointMeasures`. References ---------- .. [1] Scargle, J et al. (2013) https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S """ def __init__(self, p0=0.05, gamma=None, ncp_prior=None): self.p0 = p0 self.gamma = gamma self.ncp_prior = ncp_prior def validate_input(self, t, x=None, sigma=None): """Validate inputs to the model. Parameters ---------- t : array-like times of observations x : array-like, optional values observed at each time sigma : float or array-like, optional errors in values x Returns ------- t, x, sigma : array-like, float or None validated and perhaps modified versions of inputs """ # validate array input t = np.asarray(t, dtype=float) # find unique values of t t = np.array(t) if t.ndim != 1: raise ValueError("t must be a one-dimensional array") unq_t, unq_ind, unq_inv = np.unique(t, return_index=True, return_inverse=True) # if x is not specified, x will be counts at each time if x is None: if sigma is not None: raise ValueError("If sigma is specified, x must be specified") else: sigma = 1 if len(unq_t) == len(t): x = np.ones_like(t) else: x = np.bincount(unq_inv) t = unq_t # if x is specified, then we need to simultaneously sort t and x else: # TODO: allow broadcasted x? x = np.asarray(x, dtype=float) if x.shape not in [(), (1,), (t.size,)]: raise ValueError("x does not match shape of t") x += np.zeros_like(t) if len(unq_t) != len(t): raise ValueError( "Repeated values in t not supported when x is specified" ) t = unq_t x = x[unq_ind] # verify the given sigma value if sigma is None: sigma = 1 else: sigma = np.asarray(sigma, dtype=float) if sigma.shape not in [(), (1,), (t.size,)]: raise ValueError("sigma does not match the shape of x") return t, x, sigma def fitness(self, **kwargs): raise NotImplementedError() def p0_prior(self, N): """ Empirical prior, parametrized by the false alarm probability ``p0`` See eq. 21 in Scargle (2013) Note that there was an error in this equation in the original Scargle paper (the "log" was missing). The following corrected form is taken from https://arxiv.org/abs/1304.2818 """ return 4 - np.log(73.53 * self.p0 * (N**-0.478)) # the fitness_args property will return the list of arguments accepted by # the method fitness(). This allows more efficient computation below. @property def _fitness_args(self): return signature(self.fitness).parameters.keys() def compute_ncp_prior(self, N): """ If ``ncp_prior`` is not explicitly defined, compute it from ``gamma`` or ``p0``. """ if self.gamma is not None: return -np.log(self.gamma) elif self.p0 is not None: return self.p0_prior(N) else: raise ValueError( "``ncp_prior`` cannot be computed as neither " "``gamma`` nor ``p0`` is defined." ) def fit(self, t, x=None, sigma=None): """Fit the Bayesian Blocks model given the specified fitness function. Parameters ---------- t : array-like data times (one dimensional, length N) x : array-like, optional data values sigma : array-like or float, optional data errors Returns ------- edges : ndarray array containing the (M+1) edges defining the M optimal bins """ t, x, sigma = self.validate_input(t, x, sigma) # compute values needed for computation, below if "a_k" in self._fitness_args: ak_raw = np.ones_like(x) / sigma**2 if "b_k" in self._fitness_args: bk_raw = x / sigma**2 if "c_k" in self._fitness_args: ck_raw = x * x / sigma**2 # create length-(N + 1) array of cell edges edges = np.concatenate([t[:1], 0.5 * (t[1:] + t[:-1]), t[-1:]]) block_length = t[-1] - edges # arrays to store the best configuration N = len(t) best = np.zeros(N, dtype=float) last = np.zeros(N, dtype=int) # Compute ncp_prior if not defined if self.ncp_prior is None: ncp_prior = self.compute_ncp_prior(N) else: ncp_prior = self.ncp_prior # ---------------------------------------------------------------- # Start with first data cell; add one cell at each iteration # ---------------------------------------------------------------- for R in range(N): # Compute fit_vec : fitness of putative last block (end at R) kwds = {} # T_k: width/duration of each block if "T_k" in self._fitness_args: kwds["T_k"] = block_length[: (R + 1)] - block_length[R + 1] # N_k: number of elements in each block if "N_k" in self._fitness_args: kwds["N_k"] = np.cumsum(x[: (R + 1)][::-1])[::-1] # a_k: eq. 31 if "a_k" in self._fitness_args: kwds["a_k"] = 0.5 * np.cumsum(ak_raw[: (R + 1)][::-1])[::-1] # b_k: eq. 32 if "b_k" in self._fitness_args: kwds["b_k"] = -np.cumsum(bk_raw[: (R + 1)][::-1])[::-1] # c_k: eq. 33 if "c_k" in self._fitness_args: kwds["c_k"] = 0.5 * np.cumsum(ck_raw[: (R + 1)][::-1])[::-1] # evaluate fitness function fit_vec = self.fitness(**kwds) A_R = fit_vec - ncp_prior A_R[1:] += best[:R] i_max = np.argmax(A_R) last[R] = i_max best[R] = A_R[i_max] # ---------------------------------------------------------------- # Now find changepoints by iteratively peeling off the last block # ---------------------------------------------------------------- change_points = np.zeros(N, dtype=int) i_cp = N ind = N while i_cp > 0: i_cp -= 1 change_points[i_cp] = ind if ind == 0: break ind = last[ind - 1] if i_cp == 0: change_points[i_cp] = 0 change_points = change_points[i_cp:] return edges[change_points] class Events(FitnessFunc): r"""Bayesian blocks fitness for binned or unbinned events Parameters ---------- p0 : float, optional False alarm probability, used to compute the prior on :math:`N_{\rm blocks}` (see eq. 21 of Scargle 2013). For the Events type data, ``p0`` does not seem to be an accurate representation of the actual false alarm probability. If you are using this fitness function for a triggering type condition, it is recommended that you run statistical trials on signal-free noise to determine an appropriate value of ``gamma`` or ``ncp_prior`` to use for a desired false alarm rate. gamma : float, optional If specified, then use this gamma to compute the general prior form, :math:`p \sim {\tt gamma}^{N_{\rm blocks}}`. If gamma is specified, p0 is ignored. ncp_prior : float, optional If specified, use the value of ``ncp_prior`` to compute the prior as above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` is ignored. """ def fitness(self, N_k, T_k): # eq. 19 from Scargle 2013 return N_k * (np.log(N_k / T_k)) def validate_input(self, t, x, sigma): t, x, sigma = super().validate_input(t, x, sigma) if x is not None and np.any(x % 1 > 0): raise ValueError("x must be integer counts for fitness='events'") return t, x, sigma class RegularEvents(FitnessFunc): r"""Bayesian blocks fitness for regular events This is for data which has a fundamental "tick" length, so that all measured values are multiples of this tick length. In each tick, there are either zero or one counts. Parameters ---------- dt : float tick rate for data p0 : float, optional False alarm probability, used to compute the prior on :math:`N_{\rm blocks}` (see eq. 21 of Scargle 2013). If gamma is specified, p0 is ignored. ncp_prior : float, optional If specified, use the value of ``ncp_prior`` to compute the prior as above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are ignored. """ def __init__(self, dt, p0=0.05, gamma=None, ncp_prior=None): self.dt = dt super().__init__(p0, gamma, ncp_prior) def validate_input(self, t, x, sigma): t, x, sigma = super().validate_input(t, x, sigma) if not np.all((x == 0) | (x == 1)): raise ValueError("Regular events must have only 0 and 1 in x") return t, x, sigma def fitness(self, T_k, N_k): # Eq. C23 of Scargle 2013 M_k = T_k / self.dt N_over_M = N_k / M_k eps = 1e-8 if np.any(N_over_M > 1 + eps): warnings.warn( "regular events: N/M > 1. Is the time step correct?", AstropyUserWarning, ) one_m_NM = 1 - N_over_M N_over_M[N_over_M <= 0] = 1 one_m_NM[one_m_NM <= 0] = 1 return N_k * np.log(N_over_M) + (M_k - N_k) * np.log(one_m_NM) class PointMeasures(FitnessFunc): r"""Bayesian blocks fitness for point measures Parameters ---------- p0 : float, optional False alarm probability, used to compute the prior on :math:`N_{\rm blocks}` (see eq. 21 of Scargle 2013). If gamma is specified, p0 is ignored. ncp_prior : float, optional If specified, use the value of ``ncp_prior`` to compute the prior as above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are ignored. """ def __init__(self, p0=0.05, gamma=None, ncp_prior=None): super().__init__(p0, gamma, ncp_prior) def fitness(self, a_k, b_k): # eq. 41 from Scargle 2013 return (b_k * b_k) / (4 * a_k) def validate_input(self, t, x, sigma): if x is None: raise ValueError("x must be specified for point measures") return super().validate_input(t, x, sigma)
57229797b75922ac534dba62e95731ad639aa670e7674ba99f70a0840a4031fc
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage contains statistical tools provided for or used by Astropy. While the `scipy.stats` package contains a wide range of statistical tools, it is a general-purpose package, and is missing some that are particularly useful to astronomy or are used in an atypical way in astronomy. This package is intended to provide such functionality, but *not* to replace `scipy.stats` if its implementation satisfies astronomers' needs. """ from . import bayesian_blocks as _bb from . import biweight, circstats, funcs from . import histogram as _hist from . import info_theory, jackknife, sigma_clipping, spatial from .bayesian_blocks import * from .biweight import * from .bls import * from .circstats import * from .funcs import * from .histogram import * from .info_theory import * from .jackknife import * from .lombscargle import * from .sigma_clipping import * from .spatial import * # This is to avoid importing deprecated modules in subpackage star import __all__ = [] __all__.extend(funcs.__all__) __all__.extend(biweight.__all__) __all__.extend(sigma_clipping.__all__) __all__.extend(jackknife.__all__) __all__.extend(circstats.__all__) __all__.extend(_bb.__all__) __all__.extend(_hist.__all__) __all__.extend(info_theory.__all__) __all__.extend(spatial.__all__)
dc167e1a0881902612fe938890b721dc0ba5be80f44287c80b8809d7ac87536e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import numpy from setuptools import Extension SRCDIR = os.path.join(os.path.relpath(os.path.dirname(__file__)), "src") SRCFILES = ["wirth_select.c", "compute_bounds.c", "fast_sigma_clip.c"] SRCFILES = [os.path.join(SRCDIR, srcfile) for srcfile in SRCFILES] def get_extensions(): _sigma_clip_ext = Extension( name="astropy.stats._fast_sigma_clip", sources=SRCFILES, include_dirs=[numpy.get_include()], language="c", ) return [_sigma_clip_ext]
8735381a662faba6c6b78abc164b291e81beef627e0a94a20c7dc9090731fd98
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains simple functions for dealing with circular statistics, for instance, mean, variance, standard deviation, correlation coefficient, and so on. This module also cover tests of uniformity, e.g., the Rayleigh and V tests. The Maximum Likelihood Estimator for the Von Mises distribution along with the Cramer-Rao Lower Bounds are also implemented. Almost all of the implementations are based on reference [1]_, which is also the basis for the R package 'CircStats' [2]_. """ import numpy as np from astropy.units import Quantity __all__ = [ "circmean", "circstd", "circvar", "circmoment", "circcorrcoef", "rayleightest", "vtest", "vonmisesmle", ] __doctest_requires__ = {"vtest": ["scipy"]} def _components(data, p=1, phi=0.0, axis=None, weights=None): # Utility function for computing the generalized rectangular components # of the circular data. if weights is None: weights = np.ones((1,)) try: weights = np.broadcast_to(weights, data.shape) except ValueError: raise ValueError("Weights and data have inconsistent shape.") C = np.sum(weights * np.cos(p * (data - phi)), axis) / np.sum(weights, axis) S = np.sum(weights * np.sin(p * (data - phi)), axis) / np.sum(weights, axis) return C, S def _angle(data, p=1, phi=0.0, axis=None, weights=None): # Utility function for computing the generalized sample mean angle C, S = _components(data, p, phi, axis, weights) # theta will be an angle in the interval [-np.pi, np.pi) # [-180, 180)*u.deg in case data is a Quantity theta = np.arctan2(S, C) if isinstance(data, Quantity): theta = theta.to(data.unit) return theta def _length(data, p=1, phi=0.0, axis=None, weights=None): # Utility function for computing the generalized sample length C, S = _components(data, p, phi, axis, weights) return np.hypot(S, C) def circmean(data, axis=None, weights=None): """Computes the circular mean angle of an array of circular data. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. axis : int, optional Axis along which circular means are computed. The default is to compute the mean of the flattened array. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- circmean : ndarray or `~astropy.units.Quantity` Circular mean. Examples -------- >>> import numpy as np >>> from astropy.stats import circmean >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circmean(data) # doctest: +FLOAT_CMP <Quantity 48.62718088722989 deg> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> """ return _angle(data, 1, 0.0, axis, weights) def circvar(data, axis=None, weights=None): """Computes the circular variance of an array of circular data. There are some concepts for defining measures of dispersion for circular data. The variance implemented here is based on the definition given by [1]_, which is also the same used by the R package 'CircStats' [2]_. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. Dimensionless, if Quantity. axis : int, optional Axis along which circular variances are computed. The default is to compute the variance of the flattened array. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- circvar : ndarray or `~astropy.units.Quantity` ['dimensionless'] Circular variance. Examples -------- >>> import numpy as np >>> from astropy.stats import circvar >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circvar(data) # doctest: +FLOAT_CMP <Quantity 0.16356352748437508> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> Notes ----- The definition used here differs from the one in scipy.stats.circvar. Precisely, Scipy circvar uses an approximation based on the limit of small angles which approaches the linear variance. """ return 1.0 - _length(data, 1, 0.0, axis, weights) def circstd(data, axis=None, weights=None, method="angular"): """Computes the circular standard deviation of an array of circular data. The standard deviation implemented here is based on the definitions given by [1]_, which is also the same used by the R package 'CirStat' [2]_. Two methods are implemented: 'angular' and 'circular'. The former is defined as sqrt(2 * (1 - R)) and it is bounded in [0, 2*Pi]. The latter is defined as sqrt(-2 * ln(R)) and it is bounded in [0, inf]. Following 'CircStat' the default method used to obtain the standard deviation is 'angular'. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. If quantity, must be dimensionless. axis : int, optional Axis along which circular variances are computed. The default is to compute the variance of the flattened array. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [3]_, remark 1.4, page 22, for detailed explanation. method : str, optional The method used to estimate the standard deviation: - 'angular' : obtains the angular deviation - 'circular' : obtains the circular deviation Returns ------- circstd : ndarray or `~astropy.units.Quantity` ['dimensionless'] Angular or circular standard deviation. Examples -------- >>> import numpy as np >>> from astropy.stats import circstd >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circstd(data) # doctest: +FLOAT_CMP <Quantity 0.57195022> Alternatively, using the 'circular' method: >>> import numpy as np >>> from astropy.stats import circstd >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circstd(data, method='circular') # doctest: +FLOAT_CMP <Quantity 0.59766999> References ---------- .. [1] P. Berens. "CircStat: A MATLAB Toolbox for Circular Statistics". Journal of Statistical Software, vol 31, issue 10, 2009. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> .. [3] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. """ if method not in ("angular", "circular"): raise ValueError("method should be either 'angular' or 'circular'") if method == "angular": return np.sqrt(2.0 * (1.0 - _length(data, 1, 0.0, axis, weights))) else: return np.sqrt(-2.0 * np.log(_length(data, 1, 0.0, axis, weights))) def circmoment(data, p=1.0, centered=False, axis=None, weights=None): """Computes the ``p``-th trigonometric circular moment for an array of circular data. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. p : float, optional Order of the circular moment. centered : bool, optional If ``True``, central circular moments are computed. Default value is ``False``. axis : int, optional Axis along which circular moments are computed. The default is to compute the circular moment of the flattened array. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- circmoment : ndarray or `~astropy.units.Quantity` The first and second elements correspond to the direction and length of the ``p``-th circular moment, respectively. Examples -------- >>> import numpy as np >>> from astropy.stats import circmoment >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circmoment(data, p=2) # doctest: +FLOAT_CMP (<Quantity 90.99263082432564 deg>, <Quantity 0.48004283892950717>) References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> """ if centered: phi = circmean(data, axis, weights) else: phi = 0.0 return _angle(data, p, phi, axis, weights), _length(data, p, phi, axis, weights) def circcorrcoef(alpha, beta, axis=None, weights_alpha=None, weights_beta=None): """Computes the circular correlation coefficient between two array of circular data. Parameters ---------- alpha : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. beta : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. axis : int, optional Axis along which circular correlation coefficients are computed. The default is the compute the circular correlation coefficient of the flattened array. weights_alpha : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights_alpha`` represents a weighting factor for each group such that ``sum(weights_alpha, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. weights_beta : numpy.ndarray, optional See description of ``weights_alpha``. Returns ------- rho : ndarray or `~astropy.units.Quantity` ['dimensionless'] Circular correlation coefficient. Examples -------- >>> import numpy as np >>> from astropy.stats import circcorrcoef >>> from astropy import units as u >>> alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302, ... 324, 85, 324, 340, 157, 238, 254, 146, 232, 122, ... 329])*u.deg >>> beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94, ... 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg >>> circcorrcoef(alpha, beta) # doctest: +FLOAT_CMP <Quantity 0.2704648826748831> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> """ if np.size(alpha, axis) != np.size(beta, axis): raise ValueError("alpha and beta must be arrays of the same size") mu_a = circmean(alpha, axis, weights_alpha) mu_b = circmean(beta, axis, weights_beta) sin_a = np.sin(alpha - mu_a) sin_b = np.sin(beta - mu_b) rho = np.sum(sin_a * sin_b) / np.sqrt(np.sum(sin_a * sin_a) * np.sum(sin_b * sin_b)) return rho def rayleightest(data, axis=None, weights=None): """Performs the Rayleigh test of uniformity. This test is used to identify a non-uniform distribution, i.e. it is designed for detecting an unimodal deviation from uniformity. More precisely, it assumes the following hypotheses: - H0 (null hypothesis): The population is distributed uniformly around the circle. - H1 (alternative hypothesis): The population is not distributed uniformly around the circle. Small p-values suggest to reject the null hypothesis. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. axis : int, optional Axis along which the Rayleigh test will be performed. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``np.sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- p-value : float or `~astropy.units.Quantity` ['dimensionless'] Examples -------- >>> import numpy as np >>> from astropy.stats import rayleightest >>> from astropy import units as u >>> data = np.array([130, 90, 0, 145])*u.deg >>> rayleightest(data) # doctest: +FLOAT_CMP <Quantity 0.2563487733797317> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> .. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007. .. [4] D. Wilkie. "Rayleigh Test for Randomness of Circular Data". Applied Statistics. 1983. <http://wexler.free.fr/library/files/wilkie%20(1983)%20rayleigh%20test%20for%20randomness%20of%20circular%20data.pdf> """ n = np.size(data, axis=axis) Rbar = _length(data, 1, 0.0, axis, weights) z = n * Rbar * Rbar # see [3] and [4] for the formulae below tmp = 1.0 if n < 50: tmp = ( 1.0 + (2.0 * z - z * z) / (4.0 * n) - (24.0 * z - 132.0 * z**2.0 + 76.0 * z**3.0 - 9.0 * z**4.0) / (288.0 * n * n) ) p_value = np.exp(-z) * tmp return p_value def vtest(data, mu=0.0, axis=None, weights=None): """Performs the Rayleigh test of uniformity where the alternative hypothesis H1 is assumed to have a known mean angle ``mu``. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. mu : float or `~astropy.units.Quantity` ['angle'], optional Mean angle. Assumed to be known. axis : int, optional Axis along which the V test will be performed. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- p-value : float or `~astropy.units.Quantity` ['dimensionless'] Examples -------- >>> import numpy as np >>> from astropy.stats import vtest >>> from astropy import units as u >>> data = np.array([130, 90, 0, 145])*u.deg >>> vtest(data) # doctest: +FLOAT_CMP <Quantity 0.6223678199713766> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> .. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007. """ from scipy.stats import norm if weights is None: weights = np.ones((1,)) try: weights = np.broadcast_to(weights, data.shape) except ValueError: raise ValueError("Weights and data have inconsistent shape.") n = np.size(data, axis=axis) R0bar = np.sum(weights * np.cos(data - mu), axis) / np.sum(weights, axis) z = np.sqrt(2.0 * n) * R0bar pz = norm.cdf(z) fz = norm.pdf(z) # see reference [3] p_value = ( 1 - pz + fz * ( (3 * z - z**3) / (16.0 * n) + (15 * z + 305 * z**3 - 125 * z**5 + 9 * z**7) / (4608.0 * n * n) ) ) return p_value def _A1inv(x): # Approximation for _A1inv(x) according R Package 'CircStats' # See http://www.scienceasia.org/2012.38.n1/scias38_118.pdf, equation (4) if 0 <= x < 0.53: return 2.0 * x + x * x * x + (5.0 * x**5) / 6.0 elif x < 0.85: return -0.4 + 1.39 * x + 0.43 / (1.0 - x) else: return 1.0 / (x * x * x - 4.0 * x * x + 3.0 * x) def vonmisesmle(data, axis=None): """Computes the Maximum Likelihood Estimator (MLE) for the parameters of the von Mises distribution. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. axis : int, optional Axis along which the mle will be computed. Returns ------- mu : float or `~astropy.units.Quantity` The mean (aka location parameter). kappa : float or `~astropy.units.Quantity` ['dimensionless'] The concentration parameter. Examples -------- >>> import numpy as np >>> from astropy.stats import vonmisesmle >>> from astropy import units as u >>> data = np.array([130, 90, 0, 145])*u.deg >>> vonmisesmle(data) # doctest: +FLOAT_CMP (<Quantity 101.16894320013179 deg>, <Quantity 1.49358958737054>) References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> """ mu = circmean(data, axis=None) kappa = _A1inv(np.mean(np.cos(data - mu), axis)) return mu, kappa
19d5f76c728a4025828987c9493a2ca4d0fafe8c0811613d130d01d703dbcbde
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module implements functions and classes for spatial statistics. """ import math import numpy as np __all__ = ["RipleysKEstimator"] class RipleysKEstimator: """ Estimators for Ripley's K function for two-dimensional spatial data. See [1]_, [2]_, [3]_, [4]_, [5]_ for detailed mathematical and practical aspects of those estimators. Parameters ---------- area : float Area of study from which the points where observed. x_max, y_max : float, float, optional Maximum rectangular coordinates of the area of study. Required if ``mode == 'translation'`` or ``mode == ohser``. x_min, y_min : float, float, optional Minimum rectangular coordinates of the area of study. Required if ``mode == 'variable-width'`` or ``mode == ohser``. Examples -------- >>> import numpy as np >>> from matplotlib import pyplot as plt # doctest: +SKIP >>> from astropy.stats import RipleysKEstimator >>> z = np.random.uniform(low=5, high=10, size=(100, 2)) >>> Kest = RipleysKEstimator(area=25, x_max=10, y_max=10, ... x_min=5, y_min=5) >>> r = np.linspace(0, 2.5, 100) >>> plt.plot(r, Kest.poisson(r)) # doctest: +SKIP >>> plt.plot(r, Kest(data=z, radii=r, mode='none')) # doctest: +SKIP >>> plt.plot(r, Kest(data=z, radii=r, mode='translation')) # doctest: +SKIP >>> plt.plot(r, Kest(data=z, radii=r, mode='ohser')) # doctest: +SKIP >>> plt.plot(r, Kest(data=z, radii=r, mode='var-width')) # doctest: +SKIP >>> plt.plot(r, Kest(data=z, radii=r, mode='ripley')) # doctest: +SKIP References ---------- .. [1] Peebles, P.J.E. *The large scale structure of the universe*. <https://ui.adsabs.harvard.edu/abs/1980lssu.book.....P> .. [2] Spatial descriptive statistics. <https://en.wikipedia.org/wiki/Spatial_descriptive_statistics> .. [3] Package spatstat. <https://cran.r-project.org/web/packages/spatstat/spatstat.pdf> .. [4] Cressie, N.A.C. (1991). Statistics for Spatial Data, Wiley, New York. .. [5] Stoyan, D., Stoyan, H. (1992). Fractals, Random Shapes and Point Fields, Akademie Verlag GmbH, Chichester. """ def __init__(self, area, x_max=None, y_max=None, x_min=None, y_min=None): self.area = area self.x_max = x_max self.y_max = y_max self.x_min = x_min self.y_min = y_min @property def area(self): return self._area @area.setter def area(self, value): if isinstance(value, (float, int)) and value > 0: self._area = value else: raise ValueError(f"area is expected to be a positive number. Got {value}.") @property def y_max(self): return self._y_max @y_max.setter def y_max(self, value): if value is None or isinstance(value, (float, int)): self._y_max = value else: raise ValueError( f"y_max is expected to be a real number or None. Got {value}." ) @property def x_max(self): return self._x_max @x_max.setter def x_max(self, value): if value is None or isinstance(value, (float, int)): self._x_max = value else: raise ValueError( f"x_max is expected to be a real number or None. Got {value}." ) @property def y_min(self): return self._y_min @y_min.setter def y_min(self, value): if value is None or isinstance(value, (float, int)): self._y_min = value else: raise ValueError(f"y_min is expected to be a real number. Got {value}.") @property def x_min(self): return self._x_min @x_min.setter def x_min(self, value): if value is None or isinstance(value, (float, int)): self._x_min = value else: raise ValueError(f"x_min is expected to be a real number. Got {value}.") def __call__(self, data, radii, mode="none"): return self.evaluate(data=data, radii=radii, mode=mode) def _pairwise_diffs(self, data): npts = len(data) diff = np.zeros(shape=(npts * (npts - 1) // 2, 2), dtype=np.double) k = 0 for i in range(npts - 1): size = npts - i - 1 diff[k : k + size] = abs(data[i] - data[i + 1 :]) k += size return diff def poisson(self, radii): """ Evaluates the Ripley K function for the homogeneous Poisson process, also known as Complete State of Randomness (CSR). Parameters ---------- radii : 1D array Set of distances in which Ripley's K function will be evaluated. Returns ------- output : 1D array Ripley's K function evaluated at ``radii``. """ return np.pi * radii * radii def Lfunction(self, data, radii, mode="none"): """ Evaluates the L function at ``radii``. For parameter description see ``evaluate`` method. """ return np.sqrt(self.evaluate(data, radii, mode=mode) / np.pi) def Hfunction(self, data, radii, mode="none"): """ Evaluates the H function at ``radii``. For parameter description see ``evaluate`` method. """ return self.Lfunction(data, radii, mode=mode) - radii def evaluate(self, data, radii, mode="none"): """ Evaluates the Ripley K estimator for a given set of values ``radii``. Parameters ---------- data : 2D array Set of observed points in as a n by 2 array which will be used to estimate Ripley's K function. radii : 1D array Set of distances in which Ripley's K estimator will be evaluated. Usually, it's common to consider max(radii) < (area/2)**0.5. mode : str Keyword which indicates the method for edge effects correction. Available methods are 'none', 'translation', 'ohser', 'var-width', and 'ripley'. * 'none' this method does not take into account any edge effects whatsoever. * 'translation' computes the intersection of rectangular areas centered at the given points provided the upper bounds of the dimensions of the rectangular area of study. It assumes that all the points lie in a bounded rectangular region satisfying x_min < x_i < x_max; y_min < y_i < y_max. A detailed description of this method can be found on ref [4]. * 'ohser' this method uses the isotropized set covariance function of the window of study as a weight to correct for edge-effects. A detailed description of this method can be found on ref [4]. * 'var-width' this method considers the distance of each observed point to the nearest boundary of the study window as a factor to account for edge-effects. See [3] for a brief description of this method. * 'ripley' this method is known as Ripley's edge-corrected estimator. The weight for edge-correction is a function of the proportions of circumferences centered at each data point which crosses another data point of interest. See [3] for a detailed description of this method. Returns ------- ripley : 1D array Ripley's K function estimator evaluated at ``radii``. """ data = np.asarray(data) if not data.shape[1] == 2: raise ValueError( "data must be an n by 2 array, where n is the " "number of observed points." ) npts = len(data) ripley = np.zeros(len(radii)) if mode == "none": diff = self._pairwise_diffs(data) distances = np.hypot(diff[:, 0], diff[:, 1]) for r in range(len(radii)): ripley[r] = (distances < radii[r]).sum() ripley = self.area * 2.0 * ripley / (npts * (npts - 1)) # eq. 15.11 Stoyan book page 283 elif mode == "translation": diff = self._pairwise_diffs(data) distances = np.hypot(diff[:, 0], diff[:, 1]) intersec_area = ((self.x_max - self.x_min) - diff[:, 0]) * ( (self.y_max - self.y_min) - diff[:, 1] ) for r in range(len(radii)): dist_indicator = distances < radii[r] ripley[r] = ((1 / intersec_area) * dist_indicator).sum() ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley # Stoyan book page 123 and eq 15.13 elif mode == "ohser": diff = self._pairwise_diffs(data) distances = np.hypot(diff[:, 0], diff[:, 1]) a = self.area b = max( (self.y_max - self.y_min) / (self.x_max - self.x_min), (self.x_max - self.x_min) / (self.y_max - self.y_min), ) x = distances / math.sqrt(a / b) u = np.sqrt((x * x - 1) * (x > 1)) v = np.sqrt((x * x - b**2) * (x < math.sqrt(b**2 + 1)) * (x > b)) c1 = np.pi - 2 * x * (1 + 1 / b) + x * x / b c2 = 2 * np.arcsin((1 / x) * (x > 1)) - 1 / b - 2 * (x - u) c3 = ( 2 * np.arcsin( ((b - u * v) / (x * x)) * (x > b) * (x < math.sqrt(b**2 + 1)) ) + 2 * u + 2 * v / b - b - (1 + x * x) / b ) cov_func = (a / np.pi) * ( c1 * (x >= 0) * (x <= 1) + c2 * (x > 1) * (x <= b) + c3 * (b < x) * (x < math.sqrt(b**2 + 1)) ) for r in range(len(radii)): dist_indicator = distances < radii[r] ripley[r] = ((1 / cov_func) * dist_indicator).sum() ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley # Cressie book eq 8.2.20 page 616 elif mode == "var-width": lt_dist = np.minimum( np.minimum(self.x_max - data[:, 0], self.y_max - data[:, 1]), np.minimum(data[:, 0] - self.x_min, data[:, 1] - self.y_min), ) for r in range(len(radii)): for i in range(npts): for j in range(npts): if i != j: diff = abs(data[i] - data[j]) dist = math.sqrt((diff * diff).sum()) if dist < radii[r] < lt_dist[i]: ripley[r] = ripley[r] + 1 lt_dist_sum = (lt_dist > radii[r]).sum() if not lt_dist_sum == 0: ripley[r] = ripley[r] / lt_dist_sum ripley = self.area * ripley / npts # Cressie book eq 8.4.22 page 640 elif mode == "ripley": hor_dist = np.zeros(shape=(npts * (npts - 1)) // 2, dtype=np.double) ver_dist = np.zeros(shape=(npts * (npts - 1)) // 2, dtype=np.double) for k in range(npts - 1): min_hor_dist = min(self.x_max - data[k][0], data[k][0] - self.x_min) min_ver_dist = min(self.y_max - data[k][1], data[k][1] - self.y_min) start = (k * (2 * (npts - 1) - (k - 1))) // 2 end = ((k + 1) * (2 * (npts - 1) - k)) // 2 hor_dist[start:end] = min_hor_dist * np.ones(npts - 1 - k) ver_dist[start:end] = min_ver_dist * np.ones(npts - 1 - k) diff = self._pairwise_diffs(data) dist = np.hypot(diff[:, 0], diff[:, 1]) dist_ind = dist <= np.hypot(hor_dist, ver_dist) w1 = ( 1 - ( np.arccos(np.minimum(ver_dist, dist) / dist) + np.arccos(np.minimum(hor_dist, dist) / dist) ) / np.pi ) w2 = ( 3 / 4 - 0.5 * ( np.arccos(ver_dist / dist * ~dist_ind) + np.arccos(hor_dist / dist * ~dist_ind) ) / np.pi ) weight = dist_ind * w1 + ~dist_ind * w2 for r in range(len(radii)): ripley[r] = ((dist < radii[r]) / weight).sum() ripley = self.area * 2.0 * ripley / (npts * (npts - 1)) else: raise ValueError(f"mode {mode} is not implemented.") return ripley
b4567fe29b36bad15691f39d0bc05543eb609d0e90c10150286614ea6685b478
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains simple statistical algorithms that are straightforwardly implemented as a single python function (or family of functions). This module should generally not be used directly. Everything in `__all__` is imported into `astropy.stats`, and hence that package should be used for access. """ import math import numpy as np import astropy.units as u from . import _stats __all__ = [ "gaussian_fwhm_to_sigma", "gaussian_sigma_to_fwhm", "binom_conf_interval", "binned_binom_proportion", "poisson_conf_interval", "median_absolute_deviation", "mad_std", "signal_to_noise_oir_ccd", "bootstrap", "kuiper", "kuiper_two", "kuiper_false_positive_probability", "cdf_from_intervals", "interval_overlap_length", "histogram_intervals", "fold_intervals", ] __doctest_skip__ = ["binned_binom_proportion"] __doctest_requires__ = { "binom_conf_interval": ["scipy"], "poisson_conf_interval": ["scipy"], } gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0)) """ Factor with which to multiply Gaussian 1-sigma standard deviation to convert it to full width at half maximum (FWHM). """ gaussian_fwhm_to_sigma = 1.0 / gaussian_sigma_to_fwhm """ Factor with which to multiply Gaussian full width at half maximum (FWHM) to convert it to 1-sigma standard deviation. """ def binom_conf_interval(k, n, confidence_level=0.68269, interval="wilson"): r"""Binomial proportion confidence interval given k successes, n trials. Parameters ---------- k : int or numpy.ndarray Number of successes (0 <= ``k`` <= ``n``). n : int or numpy.ndarray Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays, they must have the same shape. confidence_level : float, optional Desired probability content of interval. Default is 0.68269, corresponding to 1 sigma in a 1-dimensional Gaussian distribution. Confidence level must be in range [0, 1]. interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional Formula used for confidence interval. See notes for details. The ``'wilson'`` and ``'jeffreys'`` intervals generally give similar results, while 'flat' is somewhat different, especially for small values of ``n``. ``'wilson'`` should be somewhat faster than ``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not recommended. It is provided for comparison purposes. Default is ``'wilson'``. Returns ------- conf_interval : ndarray ``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower and upper limits, respectively, for each element in ``k``, ``n``. Notes ----- In situations where a probability of success is not known, it can be estimated from a number of trials (n) and number of observed successes (k). For example, this is done in Monte Carlo experiments designed to estimate a detection efficiency. It is simple to take the sample proportion of successes (k/n) as a reasonable best estimate of the true probability :math:`\epsilon`. However, deriving an accurate confidence interval on :math:`\epsilon` is non-trivial. There are several formulas for this interval (see [1]_). Four intervals are implemented here: **1. The Wilson Interval.** This interval, attributed to Wilson [2]_, is given by .. math:: CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2} \pm \frac{\kappa n^{1/2}}{n + \kappa^2} ((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2} where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the number of standard deviations corresponding to the desired confidence interval for a *normal* distribution (for example, 1.0 for a confidence interval of 68.269%). For a confidence interval of 100(1 - :math:`\alpha`)%, .. math:: \kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha). **2. The Jeffreys Interval.** This interval is derived by applying Bayes' theorem to the binomial distribution with the noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys prior is the Beta distribution, Beta(1/2, 1/2), which has the density function .. math:: f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}. The justification for this prior is that it is invariant under reparameterizations of the binomial proportion. The posterior density function is also a Beta distribution: Beta(k + 1/2, n - k + 1/2). The interval is then chosen so that it is *equal-tailed*: Each tail (outside the interval) contains :math:`\alpha`/2 of the posterior probability, and the interval itself contains 1 - :math:`\alpha`. This interval must be calculated numerically. Additionally, when k = 0 the lower limit is set to 0 and when k = n the upper limit is set to 1, so that in these cases, there is only one tail containing :math:`\alpha`/2 and the interval itself contains 1 - :math:`\alpha`/2 rather than the nominal 1 - :math:`\alpha`. **3. A Flat prior.** This is similar to the Jeffreys interval, but uses a flat (uniform) prior on the binomial proportion over the range 0 to 1 rather than the reparametrization-invariant Jeffreys prior. The posterior density function is a Beta distribution: Beta(k + 1, n - k + 1). The same comments about the nature of the interval (equal-tailed, etc.) also apply to this option. **4. The Wald Interval.** This interval is given by .. math:: CI_{\rm Wald} = \hat{\epsilon} \pm \kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}} The Wald interval gives acceptable results in some limiting cases. Particularly, when n is very large, and the true proportion :math:`\epsilon` is not "too close" to 0 or 1. However, as the later is not verifiable when trying to estimate :math:`\epsilon`, this is not very helpful. Its use is not recommended, but it is provided here for comparison purposes due to its prevalence in everyday practical statistics. This function requires ``scipy`` for all interval types. References ---------- .. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001). "Interval Estimation for a Binomial Proportion". Statistical Science 16 (2): 101-133. doi:10.1214/ss/1009213286 .. [2] Wilson, E. B. (1927). "Probable inference, the law of succession, and statistical inference". Journal of the American Statistical Association 22: 209-212. .. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186 (1007): 453-461. doi:10.1098/rspa.1946.0056 .. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford University Press, 3rd edition. ISBN 978-0198503682 Examples -------- Integer inputs return an array with shape (2,): >>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP array([0.57921724, 0.92078259]) Arrays of arbitrary dimension are supported. The Wilson and Jeffreys intervals give similar results, even for small k, n: >>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP array([[0.07921741, 0.21597328], [0.42078276, 0.61736012]]) >>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP array([[0.0842525 , 0.21789949], [0.42218001, 0.61753691]]) >>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP array([[0.12139799, 0.24309021], [0.45401727, 0.61535699]]) In contrast, the Wald interval gives poor results for small k, n. For k = 0 or k = n, the interval always has zero length. >>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP array([[0.02111437, 0.18091075], [0.37888563, 0.61908925]]) For confidence intervals approaching 1, the Wald interval for 0 < k < n can give intervals that extend outside [0, 1]: >>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP array([[-0.26077835, -0.16433593], [ 0.66077835, 0.96433593]]) """ if confidence_level < 0.0 or confidence_level > 1.0: raise ValueError("confidence_level must be between 0. and 1.") alpha = 1.0 - confidence_level k = np.asarray(k).astype(int) n = np.asarray(n).astype(int) if (n <= 0).any(): raise ValueError("n must be positive") if (k < 0).any() or (k > n).any(): raise ValueError("k must be in {0, 1, .., n}") if interval == "wilson" or interval == "wald": from scipy.special import erfinv kappa = np.sqrt(2.0) * min(erfinv(confidence_level), 1.0e10) # Avoid overflows. k = k.astype(float) n = n.astype(float) p = k / n if interval == "wilson": midpoint = (k + kappa**2 / 2.0) / (n + kappa**2) halflength = ( (kappa * np.sqrt(n)) / (n + kappa**2) * np.sqrt(p * (1 - p) + kappa**2 / (4 * n)) ) conf_interval = np.array([midpoint - halflength, midpoint + halflength]) # Correct intervals out of range due to floating point errors. conf_interval[conf_interval < 0.0] = 0.0 conf_interval[conf_interval > 1.0] = 1.0 else: midpoint = p halflength = kappa * np.sqrt(p * (1.0 - p) / n) conf_interval = np.array([midpoint - halflength, midpoint + halflength]) elif interval == "jeffreys" or interval == "flat": from scipy.special import betaincinv if interval == "jeffreys": lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha) upperbound = betaincinv(k + 0.5, n - k + 0.5, 1.0 - 0.5 * alpha) else: lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha) upperbound = betaincinv(k + 1, n - k + 1, 1.0 - 0.5 * alpha) # Set lower or upper bound to k/n when k/n = 0 or 1 # We have to treat the special case of k/n being scalars, # which is an ugly kludge if lowerbound.ndim == 0: if k == 0: lowerbound = 0.0 elif k == n: upperbound = 1.0 else: lowerbound[k == 0] = 0 upperbound[k == n] = 1 conf_interval = np.array([lowerbound, upperbound]) else: raise ValueError(f"Unrecognized interval: {interval:s}") return conf_interval def binned_binom_proportion( x, success, bins=10, range=None, confidence_level=0.68269, interval="wilson" ): """Binomial proportion and confidence interval in bins of a continuous variable ``x``. Given a set of datapoint pairs where the ``x`` values are continuously distributed and the ``success`` values are binomial ("success / failure" or "true / false"), place the pairs into bins according to ``x`` value and calculate the binomial proportion (fraction of successes) and confidence interval in each bin. Parameters ---------- x : sequence Values. success : sequence of bool Success (`True`) or failure (`False`) corresponding to each value in ``x``. Must be same length as ``x``. bins : int or sequence of scalar, optional If bins is an int, it defines the number of equal-width bins in the given range (10, by default). If bins is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths (in this case, 'range' is ignored). range : (float, float), optional The lower and upper range of the bins. If `None` (default), the range is set to ``(x.min(), x.max())``. Values outside the range are ignored. confidence_level : float, optional Must be in range [0, 1]. Desired probability content in the confidence interval ``(p - perr[0], p + perr[1])`` in each bin. Default is 0.68269. interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional Formula used to calculate confidence interval on the binomial proportion in each bin. See `binom_conf_interval` for definition of the intervals. The 'wilson', 'jeffreys', and 'flat' intervals generally give similar results. 'wilson' should be somewhat faster, while 'jeffreys' and 'flat' are marginally superior, but differ in the assumed prior. The 'wald' interval is generally not recommended. It is provided for comparison purposes. Default is 'wilson'. Returns ------- bin_ctr : ndarray Central value of bins. Bins without any entries are not returned. bin_halfwidth : ndarray Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and ``bin_ctr + bins_halfwidth`` give the left and right side of each bin, respectively. p : ndarray Efficiency in each bin. perr : ndarray 2-d array of shape (2, len(p)) representing the upper and lower uncertainty on p in each bin. Notes ----- This function requires ``scipy`` for all interval types. See Also -------- binom_conf_interval : Function used to estimate confidence interval in each bin. Examples -------- Suppose we wish to estimate the efficiency of a survey in detecting astronomical sources as a function of magnitude (i.e., the probability of detecting a source given its magnitude). In a realistic case, we might prepare a large number of sources with randomly selected magnitudes, inject them into simulated images, and then record which were detected at the end of the reduction pipeline. As a toy example, we generate 100 data points with randomly selected magnitudes between 20 and 30 and "observe" them with a known detection function (here, the error function, with 50% detection probability at magnitude 25): >>> from scipy.special import erf >>> from scipy.stats.distributions import binom >>> def true_efficiency(x): ... return 0.5 - 0.5 * erf((x - 25.) / 2.) >>> mag = 20. + 10. * np.random.rand(100) >>> detected = binom.rvs(1, true_efficiency(mag)) >>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20) >>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', ... label='estimate') .. plot:: import numpy as np from scipy.special import erf from scipy.stats.distributions import binom import matplotlib.pyplot as plt from astropy.stats import binned_binom_proportion def true_efficiency(x): return 0.5 - 0.5 * erf((x - 25.) / 2.) np.random.seed(400) mag = 20. + 10. * np.random.rand(100) np.random.seed(600) detected = binom.rvs(1, true_efficiency(mag)) bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20) plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', label='estimate') X = np.linspace(20., 30., 1000) plt.plot(X, true_efficiency(X), label='true efficiency') plt.ylim(0., 1.) plt.title('Detection efficiency vs magnitude') plt.xlabel('Magnitude') plt.ylabel('Detection efficiency') plt.legend() plt.show() The above example uses the Wilson confidence interval to calculate the uncertainty ``perr`` in each bin (see the definition of various confidence intervals in `binom_conf_interval`). A commonly used alternative is the Wald interval. However, the Wald interval can give nonsensical uncertainties when the efficiency is near 0 or 1, and is therefore **not** recommended. As an illustration, the following example shows the same data as above but uses the Wald interval rather than the Wilson interval to calculate ``perr``: >>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20, ... interval='wald') >>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', ... label='estimate') .. plot:: import numpy as np from scipy.special import erf from scipy.stats.distributions import binom import matplotlib.pyplot as plt from astropy.stats import binned_binom_proportion def true_efficiency(x): return 0.5 - 0.5 * erf((x - 25.) / 2.) np.random.seed(400) mag = 20. + 10. * np.random.rand(100) np.random.seed(600) detected = binom.rvs(1, true_efficiency(mag)) bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20, interval='wald') plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', label='estimate') X = np.linspace(20., 30., 1000) plt.plot(X, true_efficiency(X), label='true efficiency') plt.ylim(0., 1.) plt.title('The Wald interval can give nonsensical uncertainties') plt.xlabel('Magnitude') plt.ylabel('Detection efficiency') plt.legend() plt.show() """ x = np.ravel(x) success = np.ravel(success).astype(bool) if x.shape != success.shape: raise ValueError("sizes of x and success must match") # Put values into a histogram (`n`). Put "successful" values # into a second histogram (`k`) with identical binning. n, bin_edges = np.histogram(x, bins=bins, range=range) k, bin_edges = np.histogram(x[success], bins=bin_edges) bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.0 bin_halfwidth = bin_ctr - bin_edges[:-1] # Remove bins with zero entries. valid = n > 0 bin_ctr = bin_ctr[valid] bin_halfwidth = bin_halfwidth[valid] n = n[valid] k = k[valid] p = k / n bounds = binom_conf_interval( k, n, confidence_level=confidence_level, interval=interval ) perr = np.abs(bounds - p) return bin_ctr, bin_halfwidth, p, perr def _check_poisson_conf_inputs(sigma, background, confidence_level, name): if sigma != 1: raise ValueError(f"Only sigma=1 supported for interval {name}") if background != 0: raise ValueError(f"background not supported for interval {name}") if confidence_level is not None: raise ValueError(f"confidence_level not supported for interval {name}") def poisson_conf_interval( n, interval="root-n", sigma=1, background=0, confidence_level=None ): r"""Poisson parameter confidence interval given observed counts Parameters ---------- n : int or numpy.ndarray Number of counts (0 <= ``n``). interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional Formula used for confidence interval. See notes for details. Default is ``'root-n'``. sigma : float, optional Number of sigma for confidence interval; only supported for the 'frequentist-confidence' mode. background : float, optional Number of counts expected from the background; only supported for the 'kraft-burrows-nousek' mode. This number is assumed to be determined from a large region so that the uncertainty on its value is negligible. confidence_level : float, optional Confidence level between 0 and 1; only supported for the 'kraft-burrows-nousek' mode. Returns ------- conf_interval : ndarray ``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower and upper limits, respectively, for each element in ``n``. Notes ----- The "right" confidence interval to use for Poisson data is a matter of debate. The CDF working group `recommends <https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ using root-n throughout, largely in the interest of comprehensibility, but discusses other possibilities. The ATLAS group also discusses several possibilities but concludes that no single representation is suitable for all cases. The suggestion has also been `floated <https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error bars should be attached to theoretical predictions instead of observed data, which this function will not help with (but it's easy; then you really should use the square root of the theoretical prediction). The intervals implemented here are: **1. 'root-n'** This is a very widely used standard rule derived from the maximum-likelihood estimator for the mean of the Poisson process. While it produces questionable results for small n and outright wrong results for n=0, it is standard enough that people are (supposedly) used to interpreting these wonky values. The interval is .. math:: CI = (n-\sqrt{n}, n+\sqrt{n}) **2. 'root-n-0'** This is identical to the above except that where n is zero the interval returned is (0,1). **3. 'pearson'** This is an only-slightly-more-complicated rule based on Pearson's chi-squared rule (as `explained <https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by the CDF working group). It also has the nice feature that if your theory curve touches an endpoint of the interval, then your data point is indeed one sigma away. The interval is .. math:: CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25}) **4. 'sherpagehrels'** This rule is used by default in the fitting package 'sherpa'. The `documentation <https://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels>`_ claims it is based on a numerical approximation published in `Gehrels (1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it does not actually appear there. It is symmetrical, and while the upper limits are within about 1% of those given by 'frequentist-confidence', the lower limits can be badly wrong. The interval is .. math:: CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75}) **5. 'frequentist-confidence'** These are frequentist central confidence intervals: .. math:: CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n), 0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1))) where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square distribution with the indicated number of degrees of freedom and :math:`\alpha` is the one-tailed probability of the normal distribution (at the point given by the parameter 'sigma'). See `Maxwell (2011) <https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further details. **6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows for the presence of a known background :math:`B` in the source signal :math:`N`. For a given confidence level :math:`CL` the confidence interval :math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by: .. math:: CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS where the function :math:`f_{N,B}` is: .. math:: f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!} and the normalization constant :math:`C`: .. math:: C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1} = \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1} See `Kraft, Burrows, and Nousek (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further details. These formulas implement a positive, uniform prior. `Kraft, Burrows, and Nousek (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this choice in more detail and show that the problem is relatively insensitive to the choice of prior. This function has an optional dependency: Either `Scipy <https://www.scipy.org/>`_ or `mpmath <http://mpmath.org/>`_ need to be available (Scipy works only for N < 100). This code is very intense numerically, which makes it much slower than the other methods, in particular for large count numbers (above 1000 even with ``mpmath``). Fortunately, some of the other methods or a Gaussian approximation usually work well in this regime. Examples -------- >>> poisson_conf_interval(np.arange(10), interval='root-n').T array([[ 0. , 0. ], [ 0. , 2. ], [ 0.58578644, 3.41421356], [ 1.26794919, 4.73205081], [ 2. , 6. ], [ 2.76393202, 7.23606798], [ 3.55051026, 8.44948974], [ 4.35424869, 9.64575131], [ 5.17157288, 10.82842712], [ 6. , 12. ]]) >>> poisson_conf_interval(np.arange(10), interval='root-n-0').T array([[ 0. , 1. ], [ 0. , 2. ], [ 0.58578644, 3.41421356], [ 1.26794919, 4.73205081], [ 2. , 6. ], [ 2.76393202, 7.23606798], [ 3.55051026, 8.44948974], [ 4.35424869, 9.64575131], [ 5.17157288, 10.82842712], [ 6. , 12. ]]) >>> poisson_conf_interval(np.arange(10), interval='pearson').T array([[ 0. , 1. ], [ 0.38196601, 2.61803399], [ 1. , 4. ], [ 1.69722436, 5.30277564], [ 2.43844719, 6.56155281], [ 3.20871215, 7.79128785], [ 4. , 9. ], [ 4.8074176 , 10.1925824 ], [ 5.62771868, 11.37228132], [ 6.45861873, 12.54138127]]) >>> poisson_conf_interval( ... np.arange(10), interval='frequentist-confidence').T array([[ 0. , 1.84102165], [ 0.17275378, 3.29952656], [ 0.70818544, 4.63785962], [ 1.36729531, 5.91818583], [ 2.08566081, 7.16275317], [ 2.84030886, 8.38247265], [ 3.62006862, 9.58364155], [ 4.41852954, 10.77028072], [ 5.23161394, 11.94514152], [ 6.05653896, 13.11020414]]) >>> poisson_conf_interval( ... 7, interval='frequentist-confidence').T array([ 4.41852954, 10.77028072]) >>> poisson_conf_interval( ... 10, background=1.5, confidence_level=0.95, ... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP array([[ 3.47894005, 16.113329533]]) """ if not np.isscalar(n): n = np.asanyarray(n) if interval == "root-n": _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)]) elif interval == "root-n-0": _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)]) if np.isscalar(n): if n == 0: conf_interval[1] = 1 else: conf_interval[1, n == 0] = 1 elif interval == "pearson": _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array( [n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)] ) elif interval == "sherpagehrels": _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)]) elif interval == "frequentist-confidence": _check_poisson_conf_inputs(1.0, background, confidence_level, interval) import scipy.stats alpha = scipy.stats.norm.sf(sigma) conf_interval = np.array( [ 0.5 * scipy.stats.chi2(2 * n).ppf(alpha), 0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha), ] ) if np.isscalar(n): if n == 0: conf_interval[0] = 0 else: conf_interval[0, n == 0] = 0 elif interval == "kraft-burrows-nousek": # Deprecation warning in Python 3.9 when N is float, so we force int, # see https://github.com/astropy/astropy/issues/10832 if np.isscalar(n): if not isinstance(n, int): raise TypeError("Number of counts must be integer.") elif not issubclass(n.dtype.type, np.integer): raise TypeError("Number of counts must be integer.") if confidence_level is None: raise ValueError( f"Set confidence_level for method {interval}. (sigma is ignored.)" ) confidence_level = np.asanyarray(confidence_level) if np.any(confidence_level <= 0) or np.any(confidence_level >= 1): raise ValueError("confidence_level must be a number between 0 and 1.") background = np.asanyarray(background) if np.any(background < 0): raise ValueError("Background must be >= 0.") conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)( n, background, confidence_level ) conf_interval = np.vstack(conf_interval) else: raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}") return conf_interval def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False): """ Calculate the median absolute deviation (MAD). The MAD is defined as ``median(abs(a - median(a)))``. Parameters ---------- data : array-like Input array or object that can be converted to an array. axis : None, int, or tuple of int, optional The axis or axes along which the MADs are computed. The default (`None`) is to compute the MAD of the flattened array. func : callable, optional The function used to compute the median. Defaults to `numpy.ma.median` for masked arrays, otherwise to `numpy.median`. ignore_nan : bool Ignore NaN values (treat them as if they are not in the array) when computing the median. This will use `numpy.ma.median` if ``axis`` is specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version is >1.10 because nanmedian is slightly faster in this case. Returns ------- mad : float or `~numpy.ndarray` The median absolute deviation of the input array. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ndarray` will be returned. Examples -------- Generate random variates from a Gaussian distribution and return the median absolute deviation for that distribution:: >>> import numpy as np >>> from astropy.stats import median_absolute_deviation >>> rand = np.random.default_rng(12345) >>> from numpy.random import randn >>> mad = median_absolute_deviation(rand.standard_normal(1000)) >>> print(mad) # doctest: +FLOAT_CMP 0.6829504282771885 See Also -------- mad_std """ if func is None: # Check if the array has a mask and if so use np.ma.median # See https://github.com/numpy/numpy/issues/7330 why using np.ma.median # for normal arrays should not be done (summary: np.ma.median always # returns an masked array even if the result should be scalar). (#4658) if isinstance(data, np.ma.MaskedArray): is_masked = True func = np.ma.median if ignore_nan: data = np.ma.masked_where(np.isnan(data), data, copy=True) elif ignore_nan: is_masked = False func = np.nanmedian else: is_masked = False func = np.median # drops units if result is NaN else: is_masked = None data = np.asanyarray(data) # np.nanmedian has `keepdims`, which is a good option if we're not allowing # user-passed functions here data_median = func(data, axis=axis) # this conditional can be removed after this PR is merged: # https://github.com/astropy/astropy/issues/12165 if ( isinstance(data, u.Quantity) and func is np.median and data_median.ndim == 0 and np.isnan(data_median) ): data_median = data.__array_wrap__(data_median) # broadcast the median array before subtraction if axis is not None: data_median = np.expand_dims(data_median, axis=axis) result = func(np.abs(data - data_median), axis=axis, overwrite_input=True) # this conditional can be removed after this PR is merged: # https://github.com/astropy/astropy/issues/12165 if ( isinstance(data, u.Quantity) and func is np.median and result.ndim == 0 and np.isnan(result) ): result = data.__array_wrap__(result) if axis is None and np.ma.isMaskedArray(result): # return scalar version result = result.item() elif np.ma.isMaskedArray(result) and not is_masked: # if the input array was not a masked array, we don't want to return a # masked array result = result.filled(fill_value=np.nan) return result def mad_std(data, axis=None, func=None, ignore_nan=False): r""" Calculate a robust standard deviation using the `median absolute deviation (MAD) <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The standard deviation estimator is given by: .. math:: \sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)} \approx 1.4826 \ \textrm{MAD} where :math:`\Phi^{-1}(P)` is the normal inverse cumulative distribution function evaluated at probability :math:`P = 3/4`. Parameters ---------- data : array-like Data array or object that can be converted to an array. axis : None, int, or tuple of int, optional The axis or axes along which the robust standard deviations are computed. The default (`None`) is to compute the robust standard deviation of the flattened array. func : callable, optional The function used to compute the median. Defaults to `numpy.ma.median` for masked arrays, otherwise to `numpy.median`. ignore_nan : bool Ignore NaN values (treat them as if they are not in the array) when computing the median. This will use `numpy.ma.median` if ``axis`` is specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is >1.10 because nanmedian is slightly faster in this case. Returns ------- mad_std : float or `~numpy.ndarray` The robust standard deviation of the input data. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ndarray` will be returned. Examples -------- >>> import numpy as np >>> from astropy.stats import mad_std >>> rand = np.random.default_rng(12345) >>> madstd = mad_std(rand.normal(5, 2, (100, 100))) >>> print(madstd) # doctest: +FLOAT_CMP 1.984147963351707 See Also -------- biweight_midvariance, biweight_midcovariance, median_absolute_deviation """ # NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602 MAD = median_absolute_deviation(data, axis=axis, func=func, ignore_nan=ignore_nan) return MAD * 1.482602218505602 def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0): """Computes the signal to noise ratio for source being observed in the optical/IR using a CCD. Parameters ---------- t : float or numpy.ndarray CCD integration time in seconds source_eps : float Number of electrons (photons) or DN per second in the aperture from the source. Note that this should already have been scaled by the filter transmission and the quantum efficiency of the CCD. If the input is in DN, then be sure to set the gain to the proper value for the CCD. If the input is in electrons per second, then keep the gain as its default of 1.0. sky_eps : float Number of electrons (photons) or DN per second per pixel from the sky background. Should already be scaled by filter transmission and QE. This must be in the same units as source_eps for the calculation to make sense. dark_eps : float Number of thermal electrons per second per pixel. If this is given in DN or ADU, then multiply by the gain to get the value in electrons. rd : float Read noise of the CCD in electrons. If this is given in DN or ADU, then multiply by the gain to get the value in electrons. npix : float Size of the aperture in pixels gain : float, optional Gain of the CCD. In units of electrons per DN. Returns ------- SNR : float or numpy.ndarray Signal to noise ratio calculated from the inputs """ signal = t * source_eps * gain noise = np.sqrt( t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd**2 ) return signal / noise def bootstrap(data, bootnum=100, samples=None, bootfunc=None): """Performs bootstrap resampling on numpy arrays. Bootstrap resampling is used to understand confidence intervals of sample estimates. This function returns versions of the dataset resampled with replacement ("case bootstrapping"). These can all be run through a function or statistic to produce a distribution of values which can then be used to find the confidence intervals. Parameters ---------- data : ndarray N-D array. The bootstrap resampling will be performed on the first index, so the first index should access the relevant information to be bootstrapped. bootnum : int, optional Number of bootstrap resamples samples : int, optional Number of samples in each resample. The default `None` sets samples to the number of datapoints bootfunc : function, optional Function to reduce the resampled data. Each bootstrap resample will be put through this function and the results returned. If `None`, the bootstrapped data will be returned Returns ------- boot : ndarray If bootfunc is None, then each row is a bootstrap resample of the data. If bootfunc is specified, then the columns will correspond to the outputs of bootfunc. Examples -------- Obtain a twice resampled array: >>> from astropy.stats import bootstrap >>> import numpy as np >>> from astropy.utils import NumpyRNGContext >>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 2) ... >>> bootresult # doctest: +FLOAT_CMP array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.], [3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]]) >>> bootresult.shape (2, 10) Obtain a statistic on the array >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean) ... >>> bootresult # doctest: +FLOAT_CMP array([4. , 4.6]) Obtain a statistic with two outputs on the array >>> test_statistic = lambda x: (np.sum(x), np.mean(x)) >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic) >>> bootresult # doctest: +FLOAT_CMP array([[40. , 4. ], [46. , 4.6], [35. , 3.5]]) >>> bootresult.shape (3, 2) Obtain a statistic with two outputs on the array, keeping only the first output >>> bootfunc = lambda x:test_statistic(x)[0] >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc) ... >>> bootresult # doctest: +FLOAT_CMP array([40., 46., 35.]) >>> bootresult.shape (3,) """ if samples is None: samples = data.shape[0] # make sure the input is sane if samples < 1 or bootnum < 1: raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.") if bootfunc is None: resultdims = (bootnum,) + (samples,) + data.shape[1:] else: # test number of outputs from bootfunc, avoid single outputs which are # array-like try: resultdims = (bootnum, len(bootfunc(data))) except TypeError: resultdims = (bootnum,) # create empty boot array boot = np.empty(resultdims) for i in range(bootnum): bootarr = np.random.randint(low=0, high=data.shape[0], size=samples) if bootfunc is None: boot[i] = data[bootarr] else: boot[i] = bootfunc(data[bootarr]) return boot def _scipy_kraft_burrows_nousek(N, B, CL): """Upper limit on a poisson count rate The implementation is based on Kraft, Burrows and Nousek `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_. The XMM-Newton upper limit server uses the same formalism. Parameters ---------- N : int or np.int32/np.int64 Total observed count number B : float or np.float32/np.float64 Background count rate (assumed to be known with negligible error from a large background area). CL : float or np.float32/np.float64 Confidence level (number between 0 and 1) Returns ------- S : source count limit Notes ----- Requires :mod:`~scipy`. This implementation will cause Overflow Errors for about N > 100 (the exact limit depends on details of how scipy was compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an implementation that is slower, but can deal with arbitrarily high numbers since it is based on the `mpmath <http://mpmath.org/>`_ library. """ from math import exp from scipy.integrate import quad from scipy.optimize import brentq from scipy.special import factorial def eqn8(N, B): n = np.arange(N + 1, dtype=np.float64) return 1.0 / (exp(-B) * np.sum(np.power(B, n) / factorial(n))) # The parameters of eqn8 do not vary between calls so we can calculate the # result once and reuse it. The same is True for the factorial of N. # eqn7 is called hundred times so "caching" these values yields a # significant speedup (factor 10). eqn8_res = eqn8(N, B) factorial_N = float(math.factorial(N)) def eqn7(S, N, B): SpB = S + B return eqn8_res * (exp(-SpB) * SpB**N / factorial_N) def eqn9_left(S_min, S_max, N, B): return quad(eqn7, S_min, S_max, args=(N, B), limit=500) def find_s_min(S_max, N, B): """ Kraft, Burrows and Nousek suggest to integrate from N-B in both directions at once, so that S_min and S_max move similarly (see the article for details). Here, this is implemented differently: Treat S_max as the optimization parameters in func and then calculate the matching s_min that has has eqn7(S_max) = eqn7(S_min) here. """ y_S_max = eqn7(S_max, N, B) if eqn7(0, N, B) >= y_S_max: return 0.0 else: return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B) def func(s): s_min = find_s_min(s, N, B) out = eqn9_left(s_min, s, N, B) return out[0] - CL S_max = brentq(func, N - B, 100) S_min = find_s_min(S_max, N, B) return S_min, S_max def _mpmath_kraft_burrows_nousek(N, B, CL): """Upper limit on a poisson count rate The implementation is based on Kraft, Burrows and Nousek in `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_. The XMM-Newton upper limit server used the same formalism. Parameters ---------- N : int or np.int32/np.int64 Total observed count number B : float or np.float32/np.float64 Background count rate (assumed to be known with negligible error from a large background area). CL : float or np.float32/np.float64 Confidence level (number between 0 and 1) Returns ------- S : source count limit Notes ----- Requires the `mpmath <http://mpmath.org/>`_ library. See `~astropy.stats.scipy_poisson_upper_limit` for an implementation that is based on scipy and evaluates faster, but runs only to about N = 100. """ from mpmath import exp, factorial, findroot, fsum, mpf, power, quad # We convert these values to float. Because for some reason, # mpmath.mpf cannot convert from numpy.int64 N = mpf(float(N)) B = mpf(float(B)) CL = mpf(float(CL)) tol = 1e-4 def eqn8(N, B): sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)] return 1.0 / (exp(-B) * fsum(sumterms)) eqn8_res = eqn8(N, B) factorial_N = factorial(N) def eqn7(S, N, B): SpB = S + B return eqn8_res * (exp(-SpB) * SpB**N / factorial_N) def eqn9_left(S_min, S_max, N, B): def eqn7NB(S): return eqn7(S, N, B) return quad(eqn7NB, [S_min, S_max]) def find_s_min(S_max, N, B): """ Kraft, Burrows and Nousek suggest to integrate from N-B in both directions at once, so that S_min and S_max move similarly (see the article for details). Here, this is implemented differently: Treat S_max as the optimization parameters in func and then calculate the matching s_min that has has eqn7(S_max) = eqn7(S_min) here. """ y_S_max = eqn7(S_max, N, B) # If B > N, then N-B, the "most probable" values is < 0 # and thus s_min is certainly 0. # Note: For small N, s_max is also close to 0 and root finding # might find the wrong root, thus it is important to handle this # case here and return the analytical answer (s_min = 0). if (B >= N) or (eqn7(0, N, B) >= y_S_max): return 0.0 else: def eqn7ysmax(x): return eqn7(x, N, B) - y_S_max return findroot(eqn7ysmax, [0.0, N - B], solver="ridder", tol=tol) def func(s): s_min = find_s_min(s, N, B) out = eqn9_left(s_min, s, N, B) return out - CL # Several numerical problems were found prevent the solvers from finding # the roots unless the starting values are very close to the final values. # Thus, this primitive, time-wasting, brute-force stepping here to get # an interval that can be fed into the ridder solver. s_max_guess = max(N - B, 1.0) while func(s_max_guess) < 0: s_max_guess += 1 S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver="ridder", tol=tol) S_min = find_s_min(S_max, N, B) return float(S_min), float(S_max) def _kraft_burrows_nousek(N, B, CL): """Upper limit on a poisson count rate The implementation is based on Kraft, Burrows and Nousek in `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_. The XMM-Newton upper limit server used the same formalism. Parameters ---------- N : int or np.int32/np.int64 Total observed count number B : float or np.float32/np.float64 Background count rate (assumed to be known with negligible error from a large background area). CL : float or np.float32/np.float64 Confidence level (number between 0 and 1) Returns ------- S : source count limit Notes ----- This functions has an optional dependency: Either :mod:`scipy` or `mpmath <http://mpmath.org/>`_ need to be available. (Scipy only works for N < 100). """ from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY if HAS_SCIPY and N <= 100: try: return _scipy_kraft_burrows_nousek(N, B, CL) except OverflowError: if not HAS_MPMATH: raise ValueError("Need mpmath package for input numbers this large.") if HAS_MPMATH: return _mpmath_kraft_burrows_nousek(N, B, CL) raise ImportError("Either scipy or mpmath are required.") def kuiper_false_positive_probability(D, N): """Compute the false positive probability for the Kuiper statistic. Uses the set of four formulas described in Paltani 2004; they report the resulting function never underestimates the false positive probability but can be a bit high in the N=40..50 range. (They quote a factor 1.5 at the 1e-7 level.) Parameters ---------- D : float The Kuiper test score. N : float The effective sample size. Returns ------- fpp : float The probability of a score this large arising from the null hypothesis. Notes ----- Eq 7 of Paltani 2004 appears to incorrectly quote the original formula (Stephens 1965). This function implements the original formula, as it produces a result closer to Monte Carlo simulations. References ---------- .. [1] Paltani, S., "Searching for periods in X-ray observations using Kuiper's test. Application to the ROSAT PSPC archive", Astronomy and Astrophysics, v.240, p.789-790, 2004. .. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution and significance points", Biometrika, v.52, p.309, 1965. """ try: from scipy.special import comb, factorial except ImportError: # Retained for backwards compatibility with older versions of scipy # (factorial appears to have moved here in 0.14) from scipy.misc import comb, factorial if D < 0.0 or D > 2.0: raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test") if D < 2.0 / N: return 1.0 - factorial(N) * (D - 1.0 / N) ** (N - 1) elif D < 3.0 / N: k = -(N * D - 1.0) / 2.0 r = np.sqrt(k**2 - (N * D - 2.0) ** 2 / 2.0) a, b = -k + r, -k - r return 1 - ( factorial(N - 1) * (b ** (N - 1) * (1 - a) - a ** (N - 1) * (1 - b)) / N ** (N - 2) / (b - a) ) elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.0) / (2.0 * N) and N % 2 == 1): # NOTE: the upper limit of this sum is taken from Stephens 1965 t = np.arange(np.floor(N * (1 - D)) + 1) y = D + t / N Tt = y ** (t - 3) * ( y**3 * N - y**2 * t * (3 - 2 / N) + y * t * (t - 1) * (3 - 2 / N) / N - t * (t - 1) * (t - 2) / N**2 ) term1 = comb(N, t) term2 = (1 - D - t / N) ** (N - t - 1) # term1 is formally finite, but is approximated by numpy as np.inf for # large values, so we set them to zero manually when they would be # multiplied by zero anyway term1[(term1 == np.inf) & (term2 == 0)] = 0.0 final_term = Tt * term1 * term2 return final_term.sum() else: z = D * np.sqrt(N) # When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2) # underflows. Cutting off just before avoids triggering a (pointless) # underflow warning if `under="warn"`. ms = np.arange(1, 18.82 / z) S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum() S2 = ( ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2) ).sum() return S1 - 8 * D / 3 * S2 def kuiper(data, cdf=lambda x: x, args=()): """Compute the Kuiper statistic. Use the Kuiper statistic version of the Kolmogorov-Smirnov test to find the probability that a sample like ``data`` was drawn from the distribution whose CDF is given as ``cdf``. .. warning:: This will not work correctly for distributions that are actually discrete (Poisson, for example). Parameters ---------- data : array-like The data values. cdf : callable A callable to evaluate the CDF of the distribution being tested against. Will be called with a vector of all values at once. The default is a uniform distribution. args : list-like, optional Additional arguments to be supplied to cdf. Returns ------- D : float The raw statistic. fpp : float The probability of a D this large arising with a sample drawn from the distribution whose CDF is cdf. Notes ----- The Kuiper statistic resembles the Kolmogorov-Smirnov test in that it is nonparametric and invariant under reparameterizations of the data. The Kuiper statistic, in addition, is equally sensitive throughout the domain, and it is also invariant under cyclic permutations (making it particularly appropriate for analyzing circular data). Returns (D, fpp), where D is the Kuiper D number and fpp is the probability that a value as large as D would occur if data was drawn from cdf. .. warning:: The fpp is calculated only approximately, and it can be as much as 1.5 times the true value. Stephens 1970 claims this is more effective than the KS at detecting changes in the variance of a distribution; the KS is (he claims) more sensitive at detecting changes in the mean. If cdf was obtained from data by fitting, then fpp is not correct and it will be necessary to do Monte Carlo simulations to interpret D. D should normally be independent of the shape of CDF. References ---------- .. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises and Related Statistics Without Extensive Tables", Journal of the Royal Statistical Society. Series B (Methodological), Vol. 32, No. 1. (1970), pp. 115-122. """ data = np.sort(data) cdfv = cdf(data, *args) N = len(data) D = np.amax(cdfv - np.arange(N) / float(N)) + np.amax( (np.arange(N) + 1) / float(N) - cdfv ) return D, kuiper_false_positive_probability(D, N) def kuiper_two(data1, data2): """Compute the Kuiper statistic to compare two samples. Parameters ---------- data1 : array-like The first set of data values. data2 : array-like The second set of data values. Returns ------- D : float The raw test statistic. fpp : float The probability of obtaining two samples this different from the same distribution. .. warning:: The fpp is quite approximate, especially for small samples. """ data1 = np.sort(data1) data2 = np.sort(data2) (n1,) = data1.shape (n2,) = data2.shape common_type = np.find_common_type([], [data1.dtype, data2.dtype]) if not ( np.issubdtype(common_type, np.number) and not np.issubdtype(common_type, np.complexfloating) ): raise ValueError("kuiper_two only accepts real inputs") # nans, if any, are at the end after sorting. if np.isnan(data1[-1]) or np.isnan(data2[-1]): raise ValueError("kuiper_two only accepts non-nan inputs") D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type)) Ne = len(data1) * len(data2) / float(len(data1) + len(data2)) return D, kuiper_false_positive_probability(D, Ne) def fold_intervals(intervals): """Fold the weighted intervals to the interval (0,1). Convert a list of intervals (ai, bi, wi) to a list of non-overlapping intervals covering (0,1). Each output interval has a weight equal to the sum of the wis of all the intervals that include it. All intervals are interpreted modulo 1, and weights are accumulated counting multiplicity. This is appropriate, for example, if you have one or more blocks of observation and you want to determine how much observation time was spent on different parts of a system's orbit (the blocks should be converted to units of the orbital period first). Parameters ---------- intervals : list of (3,) tuple For each tuple (ai,bi,wi); ai and bi are the limits of the interval, and wi is the weight to apply to the interval. Returns ------- breaks : (N,) array of float The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and breaks[-1] = 1 weights : (N-1,) array of float The ith element is the sum of number of times the interval breaks[i],breaks[i+1] is included in each interval times the weight associated with that interval. """ r = [] breaks = set() tot = 0 for a, b, wt in intervals: tot += (np.ceil(b) - np.floor(a)) * wt fa = a % 1 breaks.add(fa) r.append((0, fa, -wt)) fb = b % 1 breaks.add(fb) r.append((fb, 1, -wt)) breaks.add(0.0) breaks.add(1.0) breaks = sorted(breaks) breaks_map = {f: i for (i, f) in enumerate(breaks)} totals = np.zeros(len(breaks) - 1) totals += tot for a, b, wt in r: totals[breaks_map[a] : breaks_map[b]] += wt return np.array(breaks), totals def cdf_from_intervals(breaks, totals): """Construct a callable piecewise-linear CDF from a pair of arrays. Take a pair of arrays in the format returned by fold_intervals and make a callable cumulative distribution function on the interval (0,1). Parameters ---------- breaks : (N,) array of float The boundaries of successive intervals. totals : (N-1,) array of float The weight for each interval. Returns ------- f : callable A cumulative distribution function corresponding to the piecewise-constant probability distribution given by breaks, weights """ if breaks[0] != 0 or breaks[-1] != 1: raise ValueError("Intervals must be restricted to [0,1]") if np.any(np.diff(breaks) <= 0): raise ValueError("Breaks must be strictly increasing") if np.any(totals < 0): raise ValueError("Total weights in each subinterval must be nonnegative") if np.all(totals == 0): raise ValueError("At least one interval must have positive exposure") b = breaks.copy() c = np.concatenate(((0,), np.cumsum(totals * np.diff(b)))) c /= c[-1] return lambda x: np.interp(x, b, c, 0, 1) def interval_overlap_length(i1, i2): """Compute the length of overlap of two intervals. Parameters ---------- i1, i2 : (float, float) The two intervals, (interval 1, interval 2). Returns ------- l : float The length of the overlap between the two intervals. """ (a, b) = i1 (c, d) = i2 if a < c: if b < c: return 0.0 elif b < d: return b - c else: return d - c elif a < d: if b < d: return b - a else: return d - a else: return 0 def histogram_intervals(n, breaks, totals): """Histogram of a piecewise-constant weight function. This function takes a piecewise-constant weight function and computes the average weight in each histogram bin. Parameters ---------- n : int The number of bins breaks : (N,) array of float Endpoints of the intervals in the PDF totals : (N-1,) array of float Probability densities in each bin Returns ------- h : array of float The average weight for each bin """ h = np.zeros(n) start = breaks[0] for i in range(len(totals)): end = breaks[i + 1] for j in range(n): ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end)) h[j] += ol / (1.0 / n) * totals[i] start = end return h
baaa26faabe23962c02503ad83fd2b0239d7939d921bdc84e464268930258497
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains simple functions for model selection. """ import numpy as np __all__ = [ "bayesian_info_criterion", "bayesian_info_criterion_lsq", "akaike_info_criterion", "akaike_info_criterion_lsq", ] __doctest_requires__ = { "bayesian_info_criterion_lsq": ["scipy"], "akaike_info_criterion_lsq": ["scipy"], } def bayesian_info_criterion(log_likelihood, n_params, n_samples): r"""Computes the Bayesian Information Criterion (BIC) given the log of the likelihood function evaluated at the estimated (or analytically derived) parameters, the number of parameters, and the number of samples. The BIC is usually applied to decide whether increasing the number of free parameters (hence, increasing the model complexity) yields significantly better fittings. The decision is in favor of the model with the lowest BIC. BIC is given as .. math:: \mathrm{BIC} = k \ln(n) - 2L, in which :math:`n` is the sample size, :math:`k` is the number of free parameters, and :math:`L` is the log likelihood function of the model evaluated at the maximum likelihood estimate (i. e., the parameters for which L is maximized). When comparing two models define :math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which :math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is the evidence against the model with higher BIC. The general rule of thumb is: :math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is better :math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is better :math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is better :math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is better For a detailed explanation, see [1]_ - [5]_. Parameters ---------- log_likelihood : float Logarithm of the likelihood function of the model evaluated at the point of maxima (with respect to the parameter space). n_params : int Number of free parameters of the model, i.e., dimension of the parameter space. n_samples : int Number of observations. Returns ------- bic : float Bayesian Information Criterion. Examples -------- The following example was originally presented in [1]_. Consider a Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta). In addition, assume that the t model has presented a higher likelihood. The question that the BIC is proposed to answer is: "Is the increase in likelihood due to larger number of parameters?" >>> from astropy.stats.info_theory import bayesian_info_criterion >>> lnL_g = -176.4 >>> lnL_t = -173.0 >>> n_params_g = 2 >>> n_params_t = 3 >>> n_samples = 100 >>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples) >>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples) >>> bic_g - bic_t # doctest: +FLOAT_CMP 2.1948298140119391 Therefore, there exist a moderate evidence that the increasing in likelihood for t-Student model is due to the larger number of parameters. References ---------- .. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian Information Criterion. <https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf> .. [2] Wikipedia. Bayesian Information Criterion. <https://en.wikipedia.org/wiki/Bayesian_information_criterion> .. [3] Origin Lab. Comparing Two Fitting Functions. <https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc> .. [4] Liddle, A. R. Information Criteria for Astrophysical Model Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf> .. [5] Liddle, A. R. How many cosmological parameters? 2008. <https://arxiv.org/pdf/astro-ph/0401198v3.pdf> """ return n_params * np.log(n_samples) - 2.0 * log_likelihood # NOTE: bic_t - bic_g doctest is skipped because it produced slightly # different result in arm64 and big-endian s390x CI jobs. def bayesian_info_criterion_lsq(ssr, n_params, n_samples): r""" Computes the Bayesian Information Criterion (BIC) assuming that the observations come from a Gaussian distribution. In this case, BIC is given as .. math:: \mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n) in which :math:`n` is the sample size, :math:`k` is the number of free parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals between model and data. This is applicable, for instance, when the parameters of a model are estimated using the least squares statistic. See [1]_ and [2]_. Parameters ---------- ssr : float Sum of squared residuals (SSR) between model and data. n_params : int Number of free parameters of the model, i.e., dimension of the parameter space. n_samples : int Number of observations. Returns ------- bic : float Examples -------- Consider the simple 1-D fitting example presented in the Astropy modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to a source flux using the least squares statistic. However, the fittings themselves do not tell much about which model better represents this hypothetical source. Therefore, we are going to apply to BIC in order to decide in favor of a model. >>> import numpy as np >>> from astropy.modeling import models, fitting >>> from astropy.stats.info_theory import bayesian_info_criterion_lsq >>> # Generate fake data >>> np.random.seed(0) >>> x = np.linspace(-5., 5., 200) >>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2) >>> y += np.random.normal(0., 0.2, x.shape) >>> # Fit the data using a Box model. >>> # Bounds are not really needed but included here to demonstrate usage. >>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5, ... bounds={"x_0": (-5., 5.)}) >>> fit_t = fitting.LevMarLSQFitter() >>> t = fit_t(t_init, x, y) >>> # Fit the data using a Gaussian >>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.) >>> fit_g = fitting.LevMarLSQFitter() >>> g = fit_g(g_init, x, y) >>> # Compute the mean squared errors >>> ssr_t = np.sum((t(x) - y)*(t(x) - y)) >>> ssr_g = np.sum((g(x) - y)*(g(x) - y)) >>> # Compute the bics >>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0]) >>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0]) >>> bic_t - bic_g # doctest: +SKIP 30.644474706065466 Hence, there is a very strong evidence that the Gaussian model has a significantly better representation of the data than the Box model. This is, obviously, expected since the true model is Gaussian. References ---------- .. [1] Wikipedia. Bayesian Information Criterion. <https://en.wikipedia.org/wiki/Bayesian_information_criterion> .. [2] Origin Lab. Comparing Two Fitting Functions. <https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc> .. [3] Astropy Models and Fitting <https://docs.astropy.org/en/stable/modeling> """ return bayesian_info_criterion( -0.5 * n_samples * np.log(ssr / n_samples), n_params, n_samples ) def akaike_info_criterion(log_likelihood, n_params, n_samples): r""" Computes the Akaike Information Criterion (AIC). Like the Bayesian Information Criterion, the AIC is a measure of relative fitting quality which is used for fitting evaluation and model selection. The decision is in favor of the model with the lowest AIC. AIC is given as .. math:: \mathrm{AIC} = 2(k - L) in which :math:`n` is the sample size, :math:`k` is the number of free parameters, and :math:`L` is the log likelihood function of the model evaluated at the maximum likelihood estimate (i. e., the parameters for which L is maximized). In case that the sample size is not "large enough" a correction is applied, i.e. .. math:: \mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1} Rule of thumb [1]_: :math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}` :math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i :math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i :math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the models which are being compared. For detailed explanations see [1]_-[6]_. Parameters ---------- log_likelihood : float Logarithm of the likelihood function of the model evaluated at the point of maxima (with respect to the parameter space). n_params : int Number of free parameters of the model, i.e., dimension of the parameter space. n_samples : int Number of observations. Returns ------- aic : float Akaike Information Criterion. Examples -------- The following example was originally presented in [2]_. Basically, two models are being compared. One with six parameters (model 1) and another with five parameters (model 2). Despite of the fact that model 2 has a lower AIC, we could decide in favor of model 1 since the difference (in AIC) between them is only about 1.0. >>> n_samples = 121 >>> lnL1 = -3.54 >>> n1_params = 6 >>> lnL2 = -4.17 >>> n2_params = 5 >>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples) >>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples) >>> aic1 - aic2 # doctest: +FLOAT_CMP 0.9551029748283746 Therefore, we can strongly support the model 1 with the advantage that it has more free parameters. References ---------- .. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike Information Criterion. <http://machinelearning102.pbworks.com/w/file/fetch/47699383/ms_lec_2_ho.pdf> .. [2] Mazerolle, M. J. Making sense out of Akaike's Information Criterion (AIC): its use and interpretation in model selection and inference from ecological data. .. [3] Wikipedia. Akaike Information Criterion. <https://en.wikipedia.org/wiki/Akaike_information_criterion> .. [4] Origin Lab. Comparing Two Fitting Functions. <https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc> .. [5] Liddle, A. R. Information Criteria for Astrophysical Model Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf> .. [6] Liddle, A. R. How many cosmological parameters? 2008. <https://arxiv.org/pdf/astro-ph/0401198v3.pdf> """ # Correction in case of small number of observations if n_samples / float(n_params) >= 40.0: aic = 2.0 * (n_params - log_likelihood) else: aic = 2.0 * (n_params - log_likelihood) + 2.0 * n_params * (n_params + 1.0) / ( n_samples - n_params - 1.0 ) return aic def akaike_info_criterion_lsq(ssr, n_params, n_samples): r""" Computes the Akaike Information Criterion assuming that the observations are Gaussian distributed. In this case, AIC is given as .. math:: \mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k In case that the sample size is not "large enough", a correction is applied, i.e. .. math:: \mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k + \dfrac{2k(k+1)}{n-k-1} in which :math:`n` is the sample size, :math:`k` is the number of free parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals between model and data. This is applicable, for instance, when the parameters of a model are estimated using the least squares statistic. Parameters ---------- ssr : float Sum of squared residuals (SSR) between model and data. n_params : int Number of free parameters of the model, i.e., the dimension of the parameter space. n_samples : int Number of observations. Returns ------- aic : float Akaike Information Criterion. Examples -------- This example is based on Astropy Modeling webpage, Compound models section. >>> import numpy as np >>> from astropy.modeling import models, fitting >>> from astropy.stats.info_theory import akaike_info_criterion_lsq >>> np.random.seed(42) >>> # Generate fake data >>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level >>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian >>> g3 = models.Gaussian1D(2.5, 0.5, 0.1) >>> x = np.linspace(-1, 1, 200) >>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape) >>> # Fit with three Gaussians >>> g3_init = (models.Gaussian1D(.1, 0, 0.1) ... + models.Gaussian1D(.1, 0.2, 0.15) ... + models.Gaussian1D(2.4, .4, 0.1)) >>> fitter = fitting.LevMarLSQFitter() >>> g3_fit = fitter(g3_init, x, y) >>> # Fit with two Gaussians >>> g2_init = (models.Gaussian1D(.1, 0, 0.1) + ... models.Gaussian1D(2, 0.5, 0.1)) >>> g2_fit = fitter(g2_init, x, y) >>> # Fit with only one Gaussian >>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5) >>> g1_fit = fitter(g1_init, x, y) >>> # Compute the mean squared errors >>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0) >>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0) >>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0) >>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP -634.5257517810961 >>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP -662.83834510232043 >>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP -647.47312032659499 Hence, from the AIC values, we would prefer to choose the model g2_fit. However, we can considerably support the model g3_fit, since the difference in AIC is about 2.4. We should reject the model g1_fit. References ---------- .. [1] Akaike Information Criterion. <https://en.wikipedia.org/wiki/Akaike_information_criterion> .. [2] Origin Lab. Comparing Two Fitting Functions. <https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc> """ return akaike_info_criterion( -0.5 * n_samples * np.log(ssr / n_samples), n_params, n_samples )
1352c7995d537c60b6f956cc0c4117e90836b58d25d4f8950a8c845a3cac5c1e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains functions for computing robust statistics using Tukey's biweight function. """ import numpy as np from .funcs import median_absolute_deviation __all__ = [ "biweight_location", "biweight_scale", "biweight_midvariance", "biweight_midcovariance", "biweight_midcorrelation", ] def _stat_functions(data, ignore_nan=False): if isinstance(data, np.ma.MaskedArray): median_func = np.ma.median sum_func = np.ma.sum elif ignore_nan: median_func = np.nanmedian sum_func = np.nansum else: median_func = np.median sum_func = np.sum return median_func, sum_func def biweight_location(data, c=6.0, M=None, axis=None, *, ignore_nan=False): r""" Compute the biweight location. The biweight location is a robust statistic for determining the central location of a distribution. It is given by: .. math:: \zeta_{biloc}= M + \frac{\sum_{|u_i|<1} \ (x_i - M) (1 - u_i^2)^2} {\sum_{|u_i|<1} \ (1 - u_i^2)^2} where :math:`x` is the input data, :math:`M` is the sample median (or the input initial location guess) and :math:`u_i` is given by: .. math:: u_{i} = \frac{(x_i - M)}{c * MAD} where :math:`c` is the tuning constant and :math:`MAD` is the `median absolute deviation <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The biweight location tuning constant ``c`` is typically 6.0 (the default). If :math:`MAD` is zero, then the median will be returned. Parameters ---------- data : array-like Input array or object that can be converted to an array. ``data`` can be a `~numpy.ma.MaskedArray`. c : float, optional Tuning constant for the biweight estimator (default = 6.0). M : float or array-like, optional Initial guess for the location. If ``M`` is a scalar value, then its value will be used for the entire array (or along each ``axis``, if specified). If ``M`` is an array, then its must be an array containing the initial location estimate along each ``axis`` of the input array. If `None` (default), then the median of the input array will be used (or along each ``axis``, if specified). axis : None, int, or tuple of int, optional The axis or axes along which the biweight locations are computed. If `None` (default), then the biweight location of the flattened input array will be computed. ignore_nan : bool, optional Whether to ignore NaN values in the input ``data``. Returns ------- biweight_location : float or `~numpy.ndarray` The biweight location of the input data. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ndarray` will be returned. See Also -------- biweight_scale, biweight_midvariance, biweight_midcovariance References ---------- .. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B) .. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwloc.htm Examples -------- Generate random variates from a Gaussian distribution and return the biweight location of the distribution: >>> import numpy as np >>> from astropy.stats import biweight_location >>> rand = np.random.default_rng(12345) >>> biloc = biweight_location(rand.standard_normal(1000)) >>> print(biloc) # doctest: +FLOAT_CMP 0.01535330525461019 """ median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan) if isinstance(data, np.ma.MaskedArray) and ignore_nan: data = np.ma.masked_where(np.isnan(data), data, copy=True) data = np.asanyarray(data).astype(np.float64) if M is None: M = median_func(data, axis=axis) if axis is not None: M = np.expand_dims(M, axis=axis) # set up the differences d = data - M # set up the weighting mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan) # mad = 0 means data is constant or mostly constant # mad = np.nan means data contains NaNs and ignore_nan=False if axis is None and (mad == 0.0 or np.isnan(mad)): return M if axis is not None: mad = np.expand_dims(mad, axis=axis) with np.errstate(divide="ignore", invalid="ignore"): u = d / (c * mad) # now remove the outlier points # ignore RuntimeWarnings for comparisons with NaN data values with np.errstate(invalid="ignore"): mask = np.abs(u) >= 1 u = (1 - u**2) ** 2 u[mask] = 0 # If mad == 0 along the specified ``axis`` in the input data, return # the median value along that axis. # Ignore RuntimeWarnings for divide by zero with np.errstate(divide="ignore", invalid="ignore"): value = M.squeeze() + (sum_func(d * u, axis=axis) / sum_func(u, axis=axis)) if np.isscalar(value): return value where_func = np.where if isinstance(data, np.ma.MaskedArray): where_func = np.ma.where # return MaskedArray return where_func(mad.squeeze() == 0, M.squeeze(), value) def biweight_scale( data, c=9.0, M=None, axis=None, modify_sample_size=False, *, ignore_nan=False ): r""" Compute the biweight scale. The biweight scale is a robust statistic for determining the standard deviation of a distribution. It is the square root of the `biweight midvariance <https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_. It is given by: .. math:: \zeta_{biscl} = \sqrt{n} \ \frac{\sqrt{\sum_{|u_i| < 1} \ (x_i - M)^2 (1 - u_i^2)^4}} {|(\sum_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2))|} where :math:`x` is the input data, :math:`M` is the sample median (or the input location) and :math:`u_i` is given by: .. math:: u_{i} = \frac{(x_i - M)}{c * MAD} where :math:`c` is the tuning constant and :math:`MAD` is the `median absolute deviation <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The biweight midvariance tuning constant ``c`` is typically 9.0 (the default). If :math:`MAD` is zero, then zero will be returned. For the standard definition of biweight scale, :math:`n` is the total number of points in the array (or along the input ``axis``, if specified). That definition is used if ``modify_sample_size`` is `False`, which is the default. However, if ``modify_sample_size = True``, then :math:`n` is the number of points for which :math:`|u_i| < 1` (i.e. the total number of non-rejected values), i.e. .. math:: n = \sum_{|u_i| < 1} \ 1 which results in a value closer to the true standard deviation for small sample sizes or for a large number of rejected values. Parameters ---------- data : array-like Input array or object that can be converted to an array. ``data`` can be a `~numpy.ma.MaskedArray`. c : float, optional Tuning constant for the biweight estimator (default = 9.0). M : float or array-like, optional The location estimate. If ``M`` is a scalar value, then its value will be used for the entire array (or along each ``axis``, if specified). If ``M`` is an array, then its must be an array containing the location estimate along each ``axis`` of the input array. If `None` (default), then the median of the input array will be used (or along each ``axis``, if specified). axis : None, int, or tuple of int, optional The axis or axes along which the biweight scales are computed. If `None` (default), then the biweight scale of the flattened input array will be computed. modify_sample_size : bool, optional If `False` (default), then the sample size used is the total number of elements in the array (or along the input ``axis``, if specified), which follows the standard definition of biweight scale. If `True`, then the sample size is reduced to correct for any rejected values (i.e. the sample size used includes only the non-rejected values), which results in a value closer to the true standard deviation for small sample sizes or for a large number of rejected values. ignore_nan : bool, optional Whether to ignore NaN values in the input ``data``. Returns ------- biweight_scale : float or `~numpy.ndarray` The biweight scale of the input data. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ndarray` will be returned. See Also -------- biweight_midvariance, biweight_midcovariance, biweight_location, astropy.stats.mad_std, astropy.stats.median_absolute_deviation References ---------- .. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B) .. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwscale.htm Examples -------- Generate random variates from a Gaussian distribution and return the biweight scale of the distribution: >>> import numpy as np >>> from astropy.stats import biweight_scale >>> rand = np.random.default_rng(12345) >>> biscl = biweight_scale(rand.standard_normal(1000)) >>> print(biscl) # doctest: +FLOAT_CMP 1.0239311812635818 """ return np.sqrt( biweight_midvariance( data, c=c, M=M, axis=axis, modify_sample_size=modify_sample_size, ignore_nan=ignore_nan, ) ) def biweight_midvariance( data, c=9.0, M=None, axis=None, modify_sample_size=False, *, ignore_nan=False ): r""" Compute the biweight midvariance. The biweight midvariance is a robust statistic for determining the variance of a distribution. Its square root is a robust estimator of scale (i.e. standard deviation). It is given by: .. math:: \zeta_{bivar} = n \ \frac{\sum_{|u_i| < 1} \ (x_i - M)^2 (1 - u_i^2)^4} {(\sum_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2))^2} where :math:`x` is the input data, :math:`M` is the sample median (or the input location) and :math:`u_i` is given by: .. math:: u_{i} = \frac{(x_i - M)}{c * MAD} where :math:`c` is the tuning constant and :math:`MAD` is the `median absolute deviation <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The biweight midvariance tuning constant ``c`` is typically 9.0 (the default). If :math:`MAD` is zero, then zero will be returned. For the standard definition of `biweight midvariance <https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_, :math:`n` is the total number of points in the array (or along the input ``axis``, if specified). That definition is used if ``modify_sample_size`` is `False`, which is the default. However, if ``modify_sample_size = True``, then :math:`n` is the number of points for which :math:`|u_i| < 1` (i.e. the total number of non-rejected values), i.e. .. math:: n = \sum_{|u_i| < 1} \ 1 which results in a value closer to the true variance for small sample sizes or for a large number of rejected values. Parameters ---------- data : array-like Input array or object that can be converted to an array. ``data`` can be a `~numpy.ma.MaskedArray`. c : float, optional Tuning constant for the biweight estimator (default = 9.0). M : float or array-like, optional The location estimate. If ``M`` is a scalar value, then its value will be used for the entire array (or along each ``axis``, if specified). If ``M`` is an array, then its must be an array containing the location estimate along each ``axis`` of the input array. If `None` (default), then the median of the input array will be used (or along each ``axis``, if specified). axis : None, int, or tuple of int, optional The axis or axes along which the biweight midvariances are computed. If `None` (default), then the biweight midvariance of the flattened input array will be computed. modify_sample_size : bool, optional If `False` (default), then the sample size used is the total number of elements in the array (or along the input ``axis``, if specified), which follows the standard definition of biweight midvariance. If `True`, then the sample size is reduced to correct for any rejected values (i.e. the sample size used includes only the non-rejected values), which results in a value closer to the true variance for small sample sizes or for a large number of rejected values. ignore_nan : bool, optional Whether to ignore NaN values in the input ``data``. Returns ------- biweight_midvariance : float or `~numpy.ndarray` The biweight midvariance of the input data. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ndarray` will be returned. See Also -------- biweight_midcovariance, biweight_midcorrelation, astropy.stats.mad_std, astropy.stats.median_absolute_deviation References ---------- .. [1] https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance .. [2] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B) Examples -------- Generate random variates from a Gaussian distribution and return the biweight midvariance of the distribution: >>> import numpy as np >>> from astropy.stats import biweight_midvariance >>> rand = np.random.default_rng(12345) >>> bivar = biweight_midvariance(rand.standard_normal(1000)) >>> print(bivar) # doctest: +FLOAT_CMP 1.0484350639638342 """ median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan) if isinstance(data, np.ma.MaskedArray) and ignore_nan: data = np.ma.masked_where(np.isnan(data), data, copy=True) data = np.asanyarray(data).astype(np.float64) if M is None: M = median_func(data, axis=axis) if axis is not None: M = np.expand_dims(M, axis=axis) # set up the differences d = data - M # set up the weighting mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan) if axis is None: # data is constant or mostly constant OR # data contains NaNs and ignore_nan=False if mad == 0.0 or np.isnan(mad): return mad**2 # variance units else: mad = np.expand_dims(mad, axis=axis) with np.errstate(divide="ignore", invalid="ignore"): u = d / (c * mad) # now remove the outlier points # ignore RuntimeWarnings for comparisons with NaN data values with np.errstate(invalid="ignore"): mask = np.abs(u) < 1 if isinstance(mask, np.ma.MaskedArray): mask = mask.filled(fill_value=False) # exclude masked data values u = u**2 if modify_sample_size: n = sum_func(mask, axis=axis) else: # set good values to 1, bad values to 0 include_mask = np.ones(data.shape) if isinstance(data, np.ma.MaskedArray): include_mask[data.mask] = 0 if ignore_nan: include_mask[np.isnan(data)] = 0 n = np.sum(include_mask, axis=axis) f1 = d * d * (1.0 - u) ** 4 f1[~mask] = 0.0 f1 = sum_func(f1, axis=axis) f2 = (1.0 - u) * (1.0 - 5.0 * u) f2[~mask] = 0.0 f2 = np.abs(np.sum(f2, axis=axis)) ** 2 # If mad == 0 along the specified ``axis`` in the input data, return # 0.0 along that axis. # Ignore RuntimeWarnings for divide by zero. with np.errstate(divide="ignore", invalid="ignore"): value = n * f1 / f2 if np.isscalar(value): return value where_func = np.where if isinstance(data, np.ma.MaskedArray): where_func = np.ma.where # return MaskedArray return where_func(mad.squeeze() == 0, 0.0, value) def biweight_midcovariance(data, c=9.0, M=None, modify_sample_size=False): r""" Compute the biweight midcovariance between pairs of multiple variables. The biweight midcovariance is a robust and resistant estimator of the covariance between two variables. This function computes the biweight midcovariance between all pairs of the input variables (rows) in the input data. The output array will have a shape of (N_variables, N_variables). The diagonal elements will be the biweight midvariances of each input variable (see :func:`biweight_midvariance`). The off-diagonal elements will be the biweight midcovariances between each pair of input variables. For example, if the input array ``data`` contains three variables (rows) ``x``, ``y``, and ``z``, the output `~numpy.ndarray` midcovariance matrix will be: .. math:: \begin{pmatrix} \zeta_{xx} & \zeta_{xy} & \zeta_{xz} \\ \zeta_{yx} & \zeta_{yy} & \zeta_{yz} \\ \zeta_{zx} & \zeta_{zy} & \zeta_{zz} \end{pmatrix} where :math:`\zeta_{xx}`, :math:`\zeta_{yy}`, and :math:`\zeta_{zz}` are the biweight midvariances of each variable. The biweight midcovariance between :math:`x` and :math:`y` is :math:`\zeta_{xy}` (:math:`= \zeta_{yx}`). The biweight midcovariance between :math:`x` and :math:`z` is :math:`\zeta_{xz}` (:math:`= \zeta_{zx}`). The biweight midcovariance between :math:`y` and :math:`z` is :math:`\zeta_{yz}` (:math:`= \zeta_{zy}`). The biweight midcovariance between two variables :math:`x` and :math:`y` is given by: .. math:: \zeta_{xy} = n_{xy} \ \frac{\sum_{|u_i| < 1, \ |v_i| < 1} \ (x_i - M_x) (1 - u_i^2)^2 (y_i - M_y) (1 - v_i^2)^2} {(\sum_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2)) (\sum_{|v_i| < 1} \ (1 - v_i^2) (1 - 5v_i^2))} where :math:`M_x` and :math:`M_y` are the medians (or the input locations) of the two variables and :math:`u_i` and :math:`v_i` are given by: .. math:: u_{i} = \frac{(x_i - M_x)}{c * MAD_x} v_{i} = \frac{(y_i - M_y)}{c * MAD_y} where :math:`c` is the biweight tuning constant and :math:`MAD_x` and :math:`MAD_y` are the `median absolute deviation <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ of the :math:`x` and :math:`y` variables. The biweight midvariance tuning constant ``c`` is typically 9.0 (the default). If :math:`MAD_x` or :math:`MAD_y` are zero, then zero will be returned for that element. For the standard definition of biweight midcovariance, :math:`n_{xy}` is the total number of observations of each variable. That definition is used if ``modify_sample_size`` is `False`, which is the default. However, if ``modify_sample_size = True``, then :math:`n_{xy}` is the number of observations for which :math:`|u_i| < 1` and/or :math:`|v_i| < 1`, i.e. .. math:: n_{xx} = \sum_{|u_i| < 1} \ 1 .. math:: n_{xy} = n_{yx} = \sum_{|u_i| < 1, \ |v_i| < 1} \ 1 .. math:: n_{yy} = \sum_{|v_i| < 1} \ 1 which results in a value closer to the true variance for small sample sizes or for a large number of rejected values. Parameters ---------- data : 2D or 1D array-like Input data either as a 2D or 1D array. For a 2D array, it should have a shape (N_variables, N_observations). A 1D array may be input for observations of a single variable, in which case the biweight midvariance will be calculated (no covariance). Each row of ``data`` represents a variable, and each column a single observation of all those variables (same as the `numpy.cov` convention). c : float, optional Tuning constant for the biweight estimator (default = 9.0). M : float or 1D array-like, optional The location estimate of each variable, either as a scalar or array. If ``M`` is an array, then its must be a 1D array containing the location estimate of each row (i.e. ``a.ndim`` elements). If ``M`` is a scalar value, then its value will be used for each variable (row). If `None` (default), then the median of each variable (row) will be used. modify_sample_size : bool, optional If `False` (default), then the sample size used is the total number of observations of each variable, which follows the standard definition of biweight midcovariance. If `True`, then the sample size is reduced to correct for any rejected values (see formula above), which results in a value closer to the true covariance for small sample sizes or for a large number of rejected values. Returns ------- biweight_midcovariance : ndarray A 2D array representing the biweight midcovariances between each pair of the variables (rows) in the input array. The output array will have a shape of (N_variables, N_variables). The diagonal elements will be the biweight midvariances of each input variable. The off-diagonal elements will be the biweight midcovariances between each pair of input variables. See Also -------- biweight_midvariance, biweight_midcorrelation, biweight_scale, biweight_location References ---------- .. [1] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwmidc.htm Examples -------- Compute the biweight midcovariance between two random variables: >>> import numpy as np >>> from astropy.stats import biweight_midcovariance >>> # Generate two random variables x and y >>> rng = np.random.default_rng(1) >>> x = rng.normal(0, 1, 200) >>> y = rng.normal(0, 3, 200) >>> # Introduce an obvious outlier >>> x[0] = 30.0 >>> # Calculate the biweight midcovariances between x and y >>> bicov = biweight_midcovariance([x, y]) >>> print(bicov) # doctest: +FLOAT_CMP [[0.83435568 0.02379316] [0.02379316 7.15665769]] >>> # Print standard deviation estimates >>> print(np.sqrt(bicov.diagonal())) # doctest: +FLOAT_CMP [0.91343072 2.67519302] """ data = np.asanyarray(data).astype(np.float64) # ensure data is 2D if data.ndim == 1: data = data[np.newaxis, :] if data.ndim != 2: raise ValueError("The input array must be 2D or 1D.") # estimate location if not given if M is None: M = np.median(data, axis=1) M = np.asanyarray(M) if M.ndim > 1: raise ValueError("M must be a scalar or 1D array.") # set up the differences d = (data.T - M).T # set up the weighting mad = median_absolute_deviation(data, axis=1) with np.errstate(divide="ignore", invalid="ignore"): u = (d.T / (c * mad)).T # now remove the outlier points # ignore RuntimeWarnings for comparisons with NaN data values with np.errstate(invalid="ignore"): mask = np.abs(u) < 1 u = u**2 if modify_sample_size: maskf = mask.astype(float) n = np.inner(maskf, maskf) else: n = data[0].size usub1 = 1.0 - u usub5 = 1.0 - 5.0 * u usub1[~mask] = 0.0 with np.errstate(divide="ignore", invalid="ignore"): numerator = d * usub1**2 denominator = (usub1 * usub5).sum(axis=1)[:, np.newaxis] numerator_matrix = np.dot(numerator, numerator.T) denominator_matrix = np.dot(denominator, denominator.T) value = n * (numerator_matrix / denominator_matrix) idx = np.where(mad == 0)[0] value[idx, :] = 0 value[:, idx] = 0 return value def biweight_midcorrelation(x, y, c=9.0, M=None, modify_sample_size=False): r""" Compute the biweight midcorrelation between two variables. The `biweight midcorrelation <https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ is a measure of similarity between samples. It is given by: .. math:: r_{bicorr} = \frac{\zeta_{xy}}{\sqrt{\zeta_{xx} \ \zeta_{yy}}} where :math:`\zeta_{xx}` is the biweight midvariance of :math:`x`, :math:`\zeta_{yy}` is the biweight midvariance of :math:`y`, and :math:`\zeta_{xy}` is the biweight midcovariance of :math:`x` and :math:`y`. Parameters ---------- x, y : 1D array-like Input arrays for the two variables. ``x`` and ``y`` must be 1D arrays and have the same number of elements. c : float, optional Tuning constant for the biweight estimator (default = 9.0). See `biweight_midcovariance` for more details. M : float or array-like, optional The location estimate. If ``M`` is a scalar value, then its value will be used for the entire array (or along each ``axis``, if specified). If ``M`` is an array, then its must be an array containing the location estimate along each ``axis`` of the input array. If `None` (default), then the median of the input array will be used (or along each ``axis``, if specified). See `biweight_midcovariance` for more details. modify_sample_size : bool, optional If `False` (default), then the sample size used is the total number of elements in the array (or along the input ``axis``, if specified), which follows the standard definition of biweight midcovariance. If `True`, then the sample size is reduced to correct for any rejected values (i.e. the sample size used includes only the non-rejected values), which results in a value closer to the true midcovariance for small sample sizes or for a large number of rejected values. See `biweight_midcovariance` for more details. Returns ------- biweight_midcorrelation : float The biweight midcorrelation between ``x`` and ``y``. See Also -------- biweight_scale, biweight_midvariance, biweight_midcovariance, biweight_location References ---------- .. [1] https://en.wikipedia.org/wiki/Biweight_midcorrelation Examples -------- Calculate the biweight midcorrelation between two variables: >>> import numpy as np >>> from astropy.stats import biweight_midcorrelation >>> rng = np.random.default_rng(12345) >>> x = rng.normal(0, 1, 200) >>> y = rng.normal(0, 3, 200) >>> # Introduce an obvious outlier >>> x[0] = 30.0 >>> bicorr = biweight_midcorrelation(x, y) >>> print(bicorr) # doctest: +FLOAT_CMP -0.09203238319481295 """ x = np.asanyarray(x) y = np.asanyarray(y) if x.ndim != 1: raise ValueError("x must be a 1D array.") if y.ndim != 1: raise ValueError("y must be a 1D array.") if x.shape != y.shape: raise ValueError("x and y must have the same shape.") bicorr = biweight_midcovariance( [x, y], c=c, M=M, modify_sample_size=modify_sample_size ) return bicorr[0, 1] / (np.sqrt(bicorr[0, 0] * bicorr[1, 1]))
5922da9f9c120eb66c07607f51acd5cfa90d0d53f3af44e8360556f9551ca422
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Methods for selecting the bin width of histograms Ported from the astroML project: https://www.astroml.org/ """ import numpy as np from .bayesian_blocks import bayesian_blocks __all__ = [ "histogram", "scott_bin_width", "freedman_bin_width", "knuth_bin_width", "calculate_bin_edges", ] def calculate_bin_edges(a, bins=10, range=None, weights=None): """ Calculate histogram bin edges like ``numpy.histogram_bin_edges``. Parameters ---------- a : array-like Input data. The bin edges are calculated over the flattened array. bins : int, list, or str, optional If ``bins`` is an int, it is the number of bins. If it is a list it is taken to be the bin edges. If it is a string, it must be one of 'blocks', 'knuth', 'scott' or 'freedman'. See `~astropy.stats.histogram` for a description of each method. range : tuple or None, optional The minimum and maximum range for the histogram. If not specified, it will be (a.min(), a.max()). However, if bins is a list it is returned unmodified regardless of the range argument. weights : array-like, optional An array the same shape as ``a``. If given, the histogram accumulates the value of the weight corresponding to ``a`` instead of returning the count of values. This argument does not affect determination of bin edges, though they may be used in the future as new methods are added. """ # if range is specified, we need to truncate the data for # the bin-finding routines if range is not None: a = a[(a >= range[0]) & (a <= range[1])] # if bins is a string, first compute bin edges with the desired heuristic if isinstance(bins, str): a = np.asarray(a).ravel() # TODO: if weights is specified, we need to modify things. # e.g. we could use point measures fitness for Bayesian blocks if weights is not None: raise NotImplementedError( "weights are not yet supported for the enhanced histogram" ) if bins == "blocks": bins = bayesian_blocks(a) elif bins == "knuth": da, bins = knuth_bin_width(a, True) elif bins == "scott": da, bins = scott_bin_width(a, True) elif bins == "freedman": da, bins = freedman_bin_width(a, True) else: raise ValueError(f"unrecognized bin code: '{bins}'") if range: # Check that the upper and lower edges are what was requested. # The current implementation of the bin width estimators does not # guarantee this, it only ensures that data outside the range is # excluded from calculation of the bin widths. if bins[0] != range[0]: bins[0] = range[0] if bins[-1] != range[1]: bins[-1] = range[1] elif np.ndim(bins) == 0: # Number of bins was given bins = np.histogram_bin_edges(a, bins, range=range, weights=weights) return bins def histogram(a, bins=10, range=None, weights=None, **kwargs): """Enhanced histogram function, providing adaptive binnings This is a histogram function that enables the use of more sophisticated algorithms for determining bins. Aside from the ``bins`` argument allowing a string specified how bins are computed, the parameters are the same as ``numpy.histogram()``. Parameters ---------- a : array-like array of data to be histogrammed bins : int, list, or str, optional If bins is a string, then it must be one of: - 'blocks' : use bayesian blocks for dynamic bin widths - 'knuth' : use Knuth's rule to determine bins - 'scott' : use Scott's rule to determine bins - 'freedman' : use the Freedman-Diaconis rule to determine bins range : tuple or None, optional the minimum and maximum range for the histogram. If not specified, it will be (x.min(), x.max()) weights : array-like, optional An array the same shape as ``a``. If given, the histogram accumulates the value of the weight corresponding to ``a`` instead of returning the count of values. This argument does not affect determination of bin edges. other keyword arguments are described in numpy.histogram(). Returns ------- hist : array The values of the histogram. See ``density`` and ``weights`` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. See Also -------- numpy.histogram """ bins = calculate_bin_edges(a, bins=bins, range=range, weights=weights) # Now we call numpy's histogram with the resulting bin edges return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs) def scott_bin_width(data, return_bins=False): r"""Return the optimal histogram bin width using Scott's rule Scott's rule is a normal reference rule: it minimizes the integrated mean squared error in the bin approximation under the assumption that the data is approximately Gaussian. Parameters ---------- data : array-like, ndim=1 observed (one-dimensional) data return_bins : bool, optional if True, then return the bin edges Returns ------- width : float optimal bin width using Scott's rule bins : ndarray bin edges: returned if ``return_bins`` is True Notes ----- The optimal bin width is .. math:: \Delta_b = \frac{3.5\sigma}{n^{1/3}} where :math:`\sigma` is the standard deviation of the data, and :math:`n` is the number of data points [1]_. References ---------- .. [1] Scott, David W. (1979). "On optimal and data-based histograms". Biometricka 66 (3): 605-610 See Also -------- knuth_bin_width freedman_bin_width bayesian_blocks histogram """ data = np.asarray(data) if data.ndim != 1: raise ValueError("data should be one-dimensional") n = data.size sigma = np.std(data) dx = 3.5 * sigma / (n ** (1 / 3)) if return_bins: Nbins = np.ceil((data.max() - data.min()) / dx) Nbins = max(1, Nbins) bins = data.min() + dx * np.arange(Nbins + 1) return dx, bins else: return dx def freedman_bin_width(data, return_bins=False): r"""Return the optimal histogram bin width using the Freedman-Diaconis rule The Freedman-Diaconis rule is a normal reference rule like Scott's rule, but uses rank-based statistics for results which are more robust to deviations from a normal distribution. Parameters ---------- data : array-like, ndim=1 observed (one-dimensional) data return_bins : bool, optional if True, then return the bin edges Returns ------- width : float optimal bin width using the Freedman-Diaconis rule bins : ndarray bin edges: returned if ``return_bins`` is True Notes ----- The optimal bin width is .. math:: \Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}} where :math:`q_{N}` is the :math:`N` percent quartile of the data, and :math:`n` is the number of data points [1]_. References ---------- .. [1] D. Freedman & P. Diaconis (1981) "On the histogram as a density estimator: L2 theory". Probability Theory and Related Fields 57 (4): 453-476 See Also -------- knuth_bin_width scott_bin_width bayesian_blocks histogram """ data = np.asarray(data) if data.ndim != 1: raise ValueError("data should be one-dimensional") n = data.size if n < 4: raise ValueError("data should have more than three entries") v25, v75 = np.percentile(data, [25, 75]) dx = 2 * (v75 - v25) / (n ** (1 / 3)) if return_bins: dmin, dmax = data.min(), data.max() Nbins = max(1, np.ceil((dmax - dmin) / dx)) try: bins = dmin + dx * np.arange(Nbins + 1) except ValueError as e: if "Maximum allowed size exceeded" in str(e): raise ValueError( "The inter-quartile range of the data is too small: " f"failed to construct histogram with {Nbins + 1} bins. " "Please use another bin method, such as " 'bins="scott"' ) else: # Something else # pragma: no cover raise return dx, bins else: return dx def knuth_bin_width(data, return_bins=False, quiet=True): r"""Return the optimal histogram bin width using Knuth's rule. Knuth's rule is a fixed-width, Bayesian approach to determining the optimal bin width of a histogram. Parameters ---------- data : array-like, ndim=1 observed (one-dimensional) data return_bins : bool, optional if True, then return the bin edges quiet : bool, optional if True (default) then suppress stdout output from scipy.optimize Returns ------- dx : float optimal bin width. Bins are measured starting at the first data point. bins : ndarray bin edges: returned if ``return_bins`` is True Notes ----- The optimal number of bins is the value M which maximizes the function .. math:: F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2}) - M\log\Gamma(\frac{1}{2}) - \log\Gamma(\frac{2n+M}{2}) + \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2}) where :math:`\Gamma` is the Gamma function, :math:`n` is the number of data points, :math:`n_k` is the number of measurements in bin :math:`k` [1]_. References ---------- .. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms". arXiv:0605197, 2006 See Also -------- freedman_bin_width scott_bin_width bayesian_blocks histogram """ # import here because of optional scipy dependency from scipy import optimize knuthF = _KnuthF(data) dx0, bins0 = freedman_bin_width(data, True) M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0] bins = knuthF.bins(M) dx = bins[1] - bins[0] if return_bins: return dx, bins else: return dx class _KnuthF: r"""Class which implements the function minimized by knuth_bin_width Parameters ---------- data : array-like, one dimension data to be histogrammed Notes ----- the function F is given by .. math:: F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2}) - M\log\Gamma(\frac{1}{2}) - \log\Gamma(\frac{2n+M}{2}) + \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2}) where :math:`\Gamma` is the Gamma function, :math:`n` is the number of data points, :math:`n_k` is the number of measurements in bin :math:`k`. See Also -------- knuth_bin_width """ def __init__(self, data): self.data = np.array(data, copy=True) if self.data.ndim != 1: raise ValueError("data should be 1-dimensional") self.data.sort() self.n = self.data.size # import here rather than globally: scipy is an optional dependency. # Note that scipy is imported in the function which calls this, # so there shouldn't be any issue importing here. from scipy import special # create a reference to gammaln to use in self.eval() self.gammaln = special.gammaln def bins(self, M): """Return the bin edges given M number of bins""" return np.linspace(self.data[0], self.data[-1], int(M) + 1) def __call__(self, M): return self.eval(M) def eval(self, M): """Evaluate the Knuth function Parameters ---------- M : int Number of bins Returns ------- F : float evaluation of the negative Knuth loglikelihood function: smaller values indicate a better fit. """ M = int(M) if M <= 0: return np.inf bins = self.bins(M) nk, bins = np.histogram(self.data, bins) return -( self.n * np.log(M) + self.gammaln(0.5 * M) - M * self.gammaln(0.5) - self.gammaln(self.n + 0.5 * M) + np.sum(self.gammaln(nk + 0.5)) )
433cbaa59bc1495290f8de0f371f101083ef3029bc44d9150e13971c14f94e3e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np __all__ = ["jackknife_resampling", "jackknife_stats"] __doctest_requires__ = {"jackknife_stats": ["scipy"]} def jackknife_resampling(data): """Performs jackknife resampling on numpy arrays. Jackknife resampling is a technique to generate 'n' deterministic samples of size 'n-1' from a measured sample of size 'n'. Basically, the i-th sample, (1<=i<=n), is generated by means of removing the i-th measurement of the original sample. Like the bootstrap resampling, this statistical technique finds applications in estimating variance, bias, and confidence intervals. Parameters ---------- data : ndarray Original sample (1-D array) from which the jackknife resamples will be generated. Returns ------- resamples : ndarray The i-th row is the i-th jackknife sample, i.e., the original sample with the i-th measurement deleted. References ---------- .. [1] McIntosh, Avery. "The Jackknife Estimation Method". <https://arxiv.org/abs/1606.00497> .. [2] Efron, Bradley. "The Jackknife, the Bootstrap, and other Resampling Plans". Technical Report No. 63, Division of Biostatistics, Stanford University, December, 1980. .. [3] Jackknife resampling <https://en.wikipedia.org/wiki/Jackknife_resampling> """ n = data.shape[0] if n <= 0: raise ValueError("data must contain at least one measurement.") resamples = np.empty([n, n - 1]) for i in range(n): resamples[i] = np.delete(data, i) return resamples def jackknife_stats(data, statistic, confidence_level=0.95): """Performs jackknife estimation on the basis of jackknife resamples. This function requires `SciPy <https://www.scipy.org/>`_ to be installed. Parameters ---------- data : ndarray Original sample (1-D array). statistic : function Any function (or vector of functions) on the basis of the measured data, e.g, sample mean, sample variance, etc. The jackknife estimate of this statistic will be returned. confidence_level : float, optional Confidence level for the confidence interval of the Jackknife estimate. Must be a real-valued number in (0,1). Default value is 0.95. Returns ------- estimate : float or `~numpy.ndarray` The i-th element is the bias-corrected "jackknifed" estimate. bias : float or `~numpy.ndarray` The i-th element is the jackknife bias. std_err : float or `~numpy.ndarray` The i-th element is the jackknife standard error. conf_interval : ndarray If ``statistic`` is single-valued, the first and second elements are the lower and upper bounds, respectively. If ``statistic`` is vector-valued, each column corresponds to the confidence interval for each component of ``statistic``. The first and second rows contain the lower and upper bounds, respectively. Examples -------- 1. Obtain Jackknife resamples: >>> import numpy as np >>> from astropy.stats import jackknife_resampling >>> from astropy.stats import jackknife_stats >>> data = np.array([1,2,3,4,5,6,7,8,9,0]) >>> resamples = jackknife_resampling(data) >>> resamples array([[2., 3., 4., 5., 6., 7., 8., 9., 0.], [1., 3., 4., 5., 6., 7., 8., 9., 0.], [1., 2., 4., 5., 6., 7., 8., 9., 0.], [1., 2., 3., 5., 6., 7., 8., 9., 0.], [1., 2., 3., 4., 6., 7., 8., 9., 0.], [1., 2., 3., 4., 5., 7., 8., 9., 0.], [1., 2., 3., 4., 5., 6., 8., 9., 0.], [1., 2., 3., 4., 5., 6., 7., 9., 0.], [1., 2., 3., 4., 5., 6., 7., 8., 0.], [1., 2., 3., 4., 5., 6., 7., 8., 9.]]) >>> resamples.shape (10, 9) 2. Obtain Jackknife estimate for the mean, its bias, its standard error, and its 95% confidence interval: >>> test_statistic = np.mean >>> estimate, bias, stderr, conf_interval = jackknife_stats( ... data, test_statistic, 0.95) >>> estimate 4.5 >>> bias 0.0 >>> stderr # doctest: +FLOAT_CMP 0.95742710775633832 >>> conf_interval array([2.62347735, 6.37652265]) 3. Example for two estimates >>> test_statistic = lambda x: (np.mean(x), np.var(x)) >>> estimate, bias, stderr, conf_interval = jackknife_stats( ... data, test_statistic, 0.95) >>> estimate array([4.5 , 9.16666667]) >>> bias array([ 0. , -0.91666667]) >>> stderr array([0.95742711, 2.69124476]) >>> conf_interval array([[ 2.62347735, 3.89192387], [ 6.37652265, 14.44140947]]) IMPORTANT: Note that confidence intervals are given as columns """ # jackknife confidence interval if not (0 < confidence_level < 1): raise ValueError("confidence level must be in (0, 1).") # make sure original data is proper n = data.shape[0] if n <= 0: raise ValueError("data must contain at least one measurement.") # Only import scipy if inputs are valid from scipy.special import erfinv resamples = jackknife_resampling(data) stat_data = statistic(data) jack_stat = np.apply_along_axis(statistic, 1, resamples) mean_jack_stat = np.mean(jack_stat, axis=0) # jackknife bias bias = (n - 1) * (mean_jack_stat - stat_data) # jackknife standard error std_err = np.sqrt( (n - 1) * np.mean((jack_stat - mean_jack_stat) * (jack_stat - mean_jack_stat), axis=0) ) # bias-corrected "jackknifed estimate" estimate = stat_data - bias z_score = np.sqrt(2.0) * erfinv(confidence_level) conf_interval = estimate + z_score * np.array((-std_err, std_err)) return estimate, bias, std_err, conf_interval
af73d6cdef0f57c1c25f01f541b30be1c341014e78c0323bd5fafb94b5a73413
""" Table property for providing information about table. """ import os # Licensed under a 3-clause BSD style license - see LICENSE.rst import sys from contextlib import contextmanager from inspect import isclass import numpy as np from astropy.utils.data_info import DataInfo __all__ = ["table_info", "TableInfo", "serialize_method_as"] def table_info(tbl, option="attributes", out=""): """ Write summary information about column to the ``out`` filehandle. By default this prints to standard output via sys.stdout. The ``option`` argument specifies what type of information to include. This can be a string, a function, or a list of strings or functions. Built-in options are: - ``attributes``: basic column meta data like ``dtype`` or ``format`` - ``stats``: basic statistics: minimum, mean, and maximum If a function is specified then that function will be called with the column as its single argument. The function must return an OrderedDict containing the information attributes. If a list is provided then the information attributes will be appended for each of the options, in order. Examples -------- >>> from astropy.table.table_helpers import simple_table >>> t = simple_table(size=2, kinds='if') >>> t['a'].unit = 'm' >>> t.info() <Table length=2> name dtype unit ---- ------- ---- a int64 m b float64 >>> t.info('stats') <Table length=2> name mean std min max ---- ---- --- --- --- a 1.5 0.5 1 2 b 1.5 0.5 1 2 Parameters ---------- option : str, callable, list of (str or callable) Info option, defaults to 'attributes'. out : file-like, None Output destination, default is sys.stdout. If None then a Table with information attributes is returned Returns ------- info : `~astropy.table.Table` if out==None else None """ from .table import Table if out == "": out = sys.stdout descr_vals = [tbl.__class__.__name__] if tbl.masked: descr_vals.append("masked=True") descr_vals.append(f"length={len(tbl)}") outlines = ["<" + " ".join(descr_vals) + ">"] cols = list(tbl.columns.values()) if tbl.colnames: infos = [] for col in cols: infos.append(col.info(option, out=None)) info = Table(infos, names=list(infos[0])) else: info = Table() if out is None: return info # Since info is going to a filehandle for viewing then remove uninteresting # columns. if "class" in info.colnames: # Remove 'class' info column if all table columns are the same class # and they are the default column class for that table. uniq_types = {type(col) for col in cols} if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass): del info["class"] if "n_bad" in info.colnames and np.all(info["n_bad"] == 0): del info["n_bad"] # Standard attributes has 'length' but this is typically redundant if "length" in info.colnames and np.all(info["length"] == len(tbl)): del info["length"] for name in info.colnames: if info[name].dtype.kind in "SU" and np.all(info[name] == ""): del info[name] if tbl.colnames: outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False)) else: outlines.append("<No columns>") out.writelines(outline + os.linesep for outline in outlines) class TableInfo(DataInfo): def __call__(self, option="attributes", out=""): return table_info(self._parent, option, out) __call__.__doc__ = table_info.__doc__ @contextmanager def serialize_method_as(tbl, serialize_method): """Context manager to temporarily override individual column info.serialize_method dict values. The serialize_method attribute is an optional dict which might look like ``{'fits': 'jd1_jd2', 'ecsv': 'formatted_value', ..}``. ``serialize_method`` is a str or dict. If str then it the the value is the ``serialize_method`` that will be used for all formats. If dict then the key values can be either: - Column name. This has higher precedence than the second option of matching class. - Class (matches any column which is an instance of the class) This context manager is expected to be used only within ``Table.write``. It could have been a private method on Table but prefer not to add clutter to that class. Parameters ---------- tbl : Table object Input table serialize_method : dict, str Dict with key values of column names or types, or str Returns ------- None (context manager) """ def get_override_sm(col): """ Determine if the ``serialize_method`` str or dict specifies an override of column presets for ``col``. Returns the matching serialize_method value or ``None``. """ # If a string then all columns match if isinstance(serialize_method, str): return serialize_method # If column name then return that serialize_method if col.info.name in serialize_method: return serialize_method[col.info.name] # Otherwise look for subclass matches for key in serialize_method: if isclass(key) and isinstance(col, key): return serialize_method[key] return None # Setup for the context block. Set individual column.info.serialize_method # values as appropriate and keep a backup copy. If ``serialize_method`` # is None or empty then don't do anything. # Original serialize_method dict, keyed by column name. This only # gets used and set if there is an override. original_sms = {} if serialize_method: # Go through every column and if it has a serialize_method info # attribute then potentially update it for the duration of the write. for col in tbl.itercols(): if hasattr(col.info, "serialize_method"): override_sm = get_override_sm(col) if override_sm: # Make a reference copy of the column serialize_method # dict which maps format (e.g. 'fits') to the # appropriate method (e.g. 'data_mask'). original_sms[col.info.name] = col.info.serialize_method # Set serialize method for *every* available format. This is # brute force, but at this point the format ('fits', 'ecsv', etc) # is not actually known (this gets determined by the write function # in registry.py). Note this creates a new temporary dict object # so that the restored version is the same original object. col.info.serialize_method = { fmt: override_sm for fmt in col.info.serialize_method } # Finally yield for the context block try: yield finally: # Teardown (restore) for the context block. Be sure to do this even # if an exception occurred. if serialize_method: for name, original_sm in original_sms.items(): tbl[name].info.serialize_method = original_sm
a652155f7a377f14d3fea56486b3ff682f5432ddc090b50be624f9a7e9b6d720
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The Index class can use several implementations as its engine. Any implementation should implement the following: __init__(data, row_index) : initialize index based on key/row list pairs add(key, row) -> None : add (key, row) to existing data remove(key, data=None) -> boolean : remove data from self[key], or all of self[key] if data is None shift_left(row) -> None : decrement row numbers after row shift_right(row) -> None : increase row numbers >= row find(key) -> list : list of rows corresponding to key range(lower, upper, bounds) -> list : rows in self[k] where k is between lower and upper (<= or < based on bounds) sort() -> None : make row order align with key order sorted_data() -> list of rows in sorted order (by key) replace_rows(row_map) -> None : replace row numbers based on slice items() -> list of tuples of the form (key, data) Notes ----- When a Table is initialized from another Table, indices are (deep) copied and their columns are set to the columns of the new Table. Column creation: Column(c) -> deep copy of indices c[[1, 2]] -> deep copy and reordering of indices c[1:2] -> reference array.view(Column) -> no indices """ from copy import deepcopy import numpy as np from .bst import MaxValue, MinValue from .sorted_array import SortedArray class QueryError(ValueError): """ Indicates that a given index cannot handle the supplied query. """ pass class Index: """ The Index class makes it possible to maintain indices on columns of a Table, so that column values can be queried quickly and efficiently. Column values are stored in lexicographic sorted order, which allows for binary searching in O(log n). Parameters ---------- columns : list or None List of columns on which to create an index. If None, create an empty index for purposes of deep copying. engine : type, instance, or None Indexing engine class to use (from among SortedArray, BST, and SCEngine) or actual engine instance. If the supplied argument is None (by default), use SortedArray. unique : bool (defaults to False) Whether the values of the index must be unique """ def __init__(self, columns, engine=None, unique=False): # Local imports to avoid import problems. from astropy.time import Time from .table import Column, Table if columns is not None: columns = list(columns) if engine is not None and not isinstance(engine, type): # create from data self.engine = engine.__class__ self.data = engine self.columns = columns return # by default, use SortedArray self.engine = engine or SortedArray if columns is None: # this creates a special exception for deep copying columns = [] data = [] row_index = [] elif len(columns) == 0: raise ValueError("Cannot create index without at least one column") elif len(columns) == 1: col = columns[0] row_index = Column(col.argsort()) data = Table([col[row_index]]) else: num_rows = len(columns[0]) # replace Time columns with approximate form and remainder new_columns = [] for col in columns: if isinstance(col, Time): new_columns.append(col.jd) remainder = col - col.__class__( col.jd, format="jd", scale=col.scale ) new_columns.append(remainder.jd) else: new_columns.append(col) # sort the table lexicographically and keep row numbers table = Table(columns + [np.arange(num_rows)], copy_indices=False) sort_columns = new_columns[::-1] try: lines = table[np.lexsort(sort_columns)] except TypeError: # arbitrary mixins might not work with lexsort lines = table[table.argsort()] data = lines[lines.colnames[:-1]] row_index = lines[lines.colnames[-1]] self.data = self.engine(data, row_index, unique=unique) self.columns = columns def __len__(self): """ Number of rows in index. """ return len(self.columns[0]) def replace_col(self, prev_col, new_col): """ Replace an indexed column with an updated reference. Parameters ---------- prev_col : Column Column reference to replace new_col : Column New column reference """ self.columns[self.col_position(prev_col.info.name)] = new_col def reload(self): """ Recreate the index based on data in self.columns. """ self.__init__(self.columns, engine=self.engine) def col_position(self, col_name): """ Return the position of col_name in self.columns. Parameters ---------- col_name : str Name of column to look up """ for i, c in enumerate(self.columns): if c.info.name == col_name: return i raise ValueError(f"Column does not belong to index: {col_name}") def insert_row(self, pos, vals, columns): """ Insert a new row from the given values. Parameters ---------- pos : int Position at which to insert row vals : list or tuple List of values to insert into a new row columns : list Table column references """ key = [None] * len(self.columns) for i, col in enumerate(columns): try: key[self.col_position(col.info.name)] = vals[i] except ValueError: # not a member of index continue num_rows = len(self.columns[0]) if pos < num_rows: # shift all rows >= pos to the right self.data.shift_right(pos) self.data.add(tuple(key), pos) def get_row_specifier(self, row_specifier): """ Return an iterable corresponding to the input row specifier. Parameters ---------- row_specifier : int, list, ndarray, or slice """ if isinstance(row_specifier, (int, np.integer)): # single row return (row_specifier,) elif isinstance(row_specifier, (list, np.ndarray)): return row_specifier elif isinstance(row_specifier, slice): col_len = len(self.columns[0]) return range(*row_specifier.indices(col_len)) raise ValueError( "Expected int, array of ints, or slice but got {} in remove_rows".format( row_specifier ) ) def remove_rows(self, row_specifier): """ Remove the given rows from the index. Parameters ---------- row_specifier : int, list, ndarray, or slice Indicates which row(s) to remove """ rows = [] # To maintain the correct row order, we loop twice, # deleting rows first and then reordering the remaining rows for row in self.get_row_specifier(row_specifier): self.remove_row(row, reorder=False) rows.append(row) # second pass - row order is reversed to maintain # correct row numbers for row in reversed(sorted(rows)): self.data.shift_left(row) def remove_row(self, row, reorder=True): """ Remove the given row from the index. Parameters ---------- row : int Position of row to remove reorder : bool Whether to reorder indices after removal """ # for removal, form a key consisting of column values in this row if not self.data.remove(tuple(col[row] for col in self.columns), row): raise ValueError(f"Could not remove row {row} from index") # decrement the row number of all later rows if reorder: self.data.shift_left(row) def find(self, key): """ Return the row values corresponding to key, in sorted order. Parameters ---------- key : tuple Values to search for in each column """ return self.data.find(key) def same_prefix(self, key): """ Return rows whose keys contain the supplied key as a prefix. Parameters ---------- key : tuple Prefix for which to search """ return self.same_prefix_range(key, key, (True, True)) def same_prefix_range(self, lower, upper, bounds=(True, True)): """ Return rows whose keys have a prefix in the given range. Parameters ---------- lower : tuple Lower prefix bound upper : tuple Upper prefix bound bounds : tuple (x, y) of bools Indicates whether the search should be inclusive or exclusive with respect to the endpoints. The first argument x corresponds to an inclusive lower bound, and the second argument y to an inclusive upper bound. """ n = len(lower) ncols = len(self.columns) a = MinValue() if bounds[0] else MaxValue() b = MaxValue() if bounds[1] else MinValue() # [x, y] search corresponds to [(x, min), (y, max)] # (x, y) search corresponds to ((x, max), (x, min)) lower = lower + tuple((ncols - n) * [a]) upper = upper + tuple((ncols - n) * [b]) return self.data.range(lower, upper, bounds) def range(self, lower, upper, bounds=(True, True)): """ Return rows within the given range. Parameters ---------- lower : tuple Lower prefix bound upper : tuple Upper prefix bound bounds : tuple (x, y) of bools Indicates whether the search should be inclusive or exclusive with respect to the endpoints. The first argument x corresponds to an inclusive lower bound, and the second argument y to an inclusive upper bound. """ return self.data.range(lower, upper, bounds) def replace(self, row, col_name, val): """ Replace the value of a column at a given position. Parameters ---------- row : int Row number to modify col_name : str Name of the Column to modify val : col.info.dtype Value to insert at specified row of col """ self.remove_row(row, reorder=False) key = [c[row] for c in self.columns] key[self.col_position(col_name)] = val self.data.add(tuple(key), row) def replace_rows(self, col_slice): """ Modify rows in this index to agree with the specified slice. For example, given an index {'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'], an input col_slice of [2, 0] will result in the relabeling {'3': 0, '2': 1} on the sliced column ['3', '2']. Parameters ---------- col_slice : list Indices to slice """ row_map = {row: i for i, row in enumerate(col_slice)} self.data.replace_rows(row_map) def sort(self): """ Make row numbers follow the same sort order as the keys of the index. """ self.data.sort() def sorted_data(self): """ Returns a list of rows in sorted order based on keys; essentially acts as an argsort() on columns. """ return self.data.sorted_data() def __getitem__(self, item): """ Returns a sliced version of this index. Parameters ---------- item : slice Input slice Returns ------- SlicedIndex A sliced reference to this index. """ return SlicedIndex(self, item) def __repr__(self): col_names = tuple(col.info.name for col in self.columns) return f"<{self.__class__.__name__} columns={col_names} data={self.data}>" def __deepcopy__(self, memo): """ Return a deep copy of this index. Notes ----- The default deep copy must be overridden to perform a shallow copy of the index columns, avoiding infinite recursion. Parameters ---------- memo : dict """ # Bypass Index.__new__ to create an actual Index, not a SlicedIndex. index = super().__new__(self.__class__) index.__init__(None, engine=self.engine) index.data = deepcopy(self.data, memo) index.columns = self.columns[:] # new list, same columns memo[id(self)] = index return index class SlicedIndex: """ This class provides a wrapper around an actual Index object to make index slicing function correctly. Since numpy expects array slices to provide an actual data view, a SlicedIndex should retrieve data directly from the original index and then adapt it to the sliced coordinate system as appropriate. Parameters ---------- index : Index The original Index reference index_slice : tuple, slice The slice to which this SlicedIndex corresponds original : bool Whether this SlicedIndex represents the original index itself. For the most part this is similar to index[:] but certain copying operations are avoided, and the slice retains the length of the actual index despite modification. """ def __init__(self, index, index_slice, original=False): self.index = index self.original = original self._frozen = False if isinstance(index_slice, tuple): self.start, self._stop, self.step = index_slice elif isinstance(index_slice, slice): # index_slice is an actual slice num_rows = len(index.columns[0]) self.start, self._stop, self.step = index_slice.indices(num_rows) else: raise TypeError("index_slice must be tuple or slice") @property def length(self): return 1 + (self.stop - self.start - 1) // self.step @property def stop(self): """ The stopping position of the slice, or the end of the index if this is an original slice. """ return len(self.index) if self.original else self._stop def __getitem__(self, item): """ Returns another slice of this Index slice. Parameters ---------- item : slice Index slice """ if self.length <= 0: # empty slice return SlicedIndex(self.index, slice(1, 0)) start, stop, step = item.indices(self.length) new_start = self.orig_coords(start) new_stop = self.orig_coords(stop) new_step = self.step * step return SlicedIndex(self.index, (new_start, new_stop, new_step)) def sliced_coords(self, rows): """ Convert the input rows to the sliced coordinate system. Parameters ---------- rows : list Rows in the original coordinate system Returns ------- sliced_rows : list Rows in the sliced coordinate system """ if self.original: return rows else: rows = np.array(rows) row0 = rows - self.start if self.step != 1: correct_mod = np.mod(row0, self.step) == 0 row0 = row0[correct_mod] if self.step > 0: ok = (row0 >= 0) & (row0 < self.stop - self.start) else: ok = (row0 <= 0) & (row0 > self.stop - self.start) return row0[ok] // self.step def orig_coords(self, row): """ Convert the input row from sliced coordinates back to original coordinates. Parameters ---------- row : int Row in the sliced coordinate system Returns ------- orig_row : int Row in the original coordinate system """ return row if self.original else self.start + row * self.step def find(self, key): return self.sliced_coords(self.index.find(key)) def where(self, col_map): return self.sliced_coords(self.index.where(col_map)) def range(self, lower, upper): return self.sliced_coords(self.index.range(lower, upper)) def same_prefix(self, key): return self.sliced_coords(self.index.same_prefix(key)) def sorted_data(self): return self.sliced_coords(self.index.sorted_data()) def replace(self, row, col, val): if not self._frozen: self.index.replace(self.orig_coords(row), col, val) def get_index_or_copy(self): if not self.original: # replace self.index with a new object reference self.index = deepcopy(self.index) return self.index def insert_row(self, pos, vals, columns): if not self._frozen: self.get_index_or_copy().insert_row(self.orig_coords(pos), vals, columns) def get_row_specifier(self, row_specifier): return [ self.orig_coords(x) for x in self.index.get_row_specifier(row_specifier) ] def remove_rows(self, row_specifier): if not self._frozen: self.get_index_or_copy().remove_rows(row_specifier) def replace_rows(self, col_slice): if not self._frozen: self.index.replace_rows([self.orig_coords(x) for x in col_slice]) def sort(self): if not self._frozen: self.get_index_or_copy().sort() def __repr__(self): slice_str = ( "" if self.original else f" slice={self.start}:{self.stop}:{self.step}" ) return ( f"<{self.__class__.__name__} original={self.original}{slice_str}" f" index={self.index}>" ) def replace_col(self, prev_col, new_col): self.index.replace_col(prev_col, new_col) def reload(self): self.index.reload() def col_position(self, col_name): return self.index.col_position(col_name) def get_slice(self, col_slice, item): """ Return a newly created index from the given slice. Parameters ---------- col_slice : Column object Already existing slice of a single column item : list or ndarray Slice for retrieval """ from .table import Table if len(self.columns) == 1: index = Index([col_slice], engine=self.data.__class__) return self.__class__(index, slice(0, 0, None), original=True) t = Table(self.columns, copy_indices=False) with t.index_mode("discard_on_copy"): new_cols = t[item].columns.values() index = Index(new_cols, engine=self.data.__class__) return self.__class__(index, slice(0, 0, None), original=True) @property def columns(self): return self.index.columns @property def data(self): return self.index.data def get_index(table, table_copy=None, names=None): """ Inputs a table and some subset of its columns as table_copy. List or tuple containing names of columns as names,and returns an index corresponding to this subset or list or None if no such index exists. Parameters ---------- table : `Table` Input table table_copy : `Table`, optional Subset of the columns in the ``table`` argument names : list, tuple, optional Subset of column names in the ``table`` argument Returns ------- Index of columns or None """ if names is not None and table_copy is not None: raise ValueError( 'one and only one argument from "table_copy" or "names" is required' ) if names is None and table_copy is None: raise ValueError( 'one and only one argument from "table_copy" or "names" is required' ) if names is not None: names = set(names) else: names = set(table_copy.colnames) if not names <= set(table.colnames): raise ValueError(f"{names} is not a subset of table columns") for name in names: for index in table[name].info.indices: if {col.info.name for col in index.columns} == names: return index return None def get_index_by_names(table, names): """ Returns an index in ``table`` corresponding to the ``names`` columns or None if no such index exists. Parameters ---------- table : `Table` Input table nmaes : tuple, list Column names """ names = list(names) for index in table.indices: index_names = [col.info.name for col in index.columns] if index_names == names: return index else: return None class _IndexModeContext: """ A context manager that allows for special indexing modes, which are intended to improve performance. Currently the allowed modes are "freeze", in which indices are not modified upon column modification, "copy_on_getitem", in which indices are copied upon column slicing, and "discard_on_copy", in which indices are discarded upon table copying/slicing. """ _col_subclasses = {} def __init__(self, table, mode): """ Parameters ---------- table : Table The table to which the mode should be applied mode : str Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'. In 'discard_on_copy' mode, indices are not copied whenever columns or tables are copied. In 'freeze' mode, indices are not modified whenever columns are modified; at the exit of the context, indices refresh themselves based on column values. This mode is intended for scenarios in which one intends to make many additions or modifications on an indexed column. In 'copy_on_getitem' mode, indices are copied when taking column slices as well as table slices, so col[i0:i1] will preserve indices. """ self.table = table self.mode = mode # Used by copy_on_getitem self._orig_classes = [] if mode not in ("freeze", "discard_on_copy", "copy_on_getitem"): raise ValueError( "Expected a mode of either 'freeze', " "'discard_on_copy', or 'copy_on_getitem', got " "'{}'".format(mode) ) def __enter__(self): if self.mode == "discard_on_copy": self.table._copy_indices = False elif self.mode == "copy_on_getitem": for col in self.table.columns.values(): self._orig_classes.append(col.__class__) col.__class__ = self._get_copy_on_getitem_shim(col.__class__) else: for index in self.table.indices: index._frozen = True def __exit__(self, exc_type, exc_value, traceback): if self.mode == "discard_on_copy": self.table._copy_indices = True elif self.mode == "copy_on_getitem": for col in reversed(self.table.columns.values()): col.__class__ = self._orig_classes.pop() else: for index in self.table.indices: index._frozen = False index.reload() def _get_copy_on_getitem_shim(self, cls): """ This creates a subclass of the column's class which overrides that class's ``__getitem__``, such that when returning a slice of the column, the relevant indices are also copied over to the slice. Ideally, rather than shimming in a new ``__class__`` we would be able to just flip a flag that is checked by the base class's ``__getitem__``. Unfortunately, since the flag needs to be a Python variable, this slows down ``__getitem__`` too much in the more common case where a copy of the indices is not needed. See the docstring for ``astropy.table._column_mixins`` for more information on that. """ if cls in self._col_subclasses: return self._col_subclasses[cls] def __getitem__(self, item): value = cls.__getitem__(self, item) if type(value) is type(self): value = self.info.slice_indices(value, item, len(self)) return value clsname = f"_{cls.__name__}WithIndexCopy" new_cls = type(str(clsname), (cls,), {"__getitem__": __getitem__}) self._col_subclasses[cls] = new_cls return new_cls class TableIndices(list): """ A special list of table indices allowing for retrieval by column name(s). Parameters ---------- lst : list List of indices """ def __init__(self, lst): super().__init__(lst) def __getitem__(self, item): """ Retrieve an item from the list of indices. Parameters ---------- item : int, str, tuple, or list Position in list or name(s) of indexed column(s) """ if isinstance(item, str): item = [item] if isinstance(item, (list, tuple)): item = list(item) for index in self: try: for name in item: index.col_position(name) if len(index.columns) == len(item): return index except ValueError: pass # index search failed raise IndexError(f"No index found for {item}") return super().__getitem__(item) class TableLoc: """ A pseudo-list of Table rows allowing for retrieval of rows by indexed column values. Parameters ---------- table : Table Indexed table to use """ def __init__(self, table): self.table = table self.indices = table.indices if len(self.indices) == 0: raise ValueError("Cannot create TableLoc object with no indices") def _get_rows(self, item): """ Retrieve Table rows indexes by value slice. """ if isinstance(item, tuple): key, item = item else: key = self.table.primary_key index = self.indices[key] if len(index.columns) > 1: raise ValueError("Cannot use .loc on multi-column indices") if isinstance(item, slice): # None signifies no upper/lower bound start = MinValue() if item.start is None else item.start stop = MaxValue() if item.stop is None else item.stop rows = index.range((start,), (stop,)) else: if not isinstance(item, (list, np.ndarray)): # single element item = [item] # item should be a list or ndarray of values rows = [] for key in item: p = index.find((key,)) if len(p) == 0: raise KeyError(f"No matches found for key {key}") else: rows.extend(p) return rows def __getitem__(self, item): """ Retrieve Table rows by value slice. Parameters ---------- item : column element, list, ndarray, slice or tuple Can be a value of the table primary index, a list/ndarray of such values, or a value slice (both endpoints are included). If a tuple is provided, the first element must be an index to use instead of the primary key, and the second element must be as above. """ rows = self._get_rows(item) if len(rows) == 0: # no matches found raise KeyError(f"No matches found for key {item}") elif len(rows) == 1: # single row return self.table[rows[0]] return self.table[rows] def __setitem__(self, key, value): """ Assign Table row's by value slice. Parameters ---------- key : column element, list, ndarray, slice or tuple Can be a value of the table primary index, a list/ndarray of such values, or a value slice (both endpoints are included). If a tuple is provided, the first element must be an index to use instead of the primary key, and the second element must be as above. value : New values of the row elements. Can be a list of tuples/lists to update the row. """ rows = self._get_rows(key) if len(rows) == 0: # no matches found raise KeyError(f"No matches found for key {key}") elif len(rows) == 1: # single row self.table[rows[0]] = value else: # multiple rows if len(rows) == len(value): for row, val in zip(rows, value): self.table[row] = val else: raise ValueError(f"Right side should contain {len(rows)} values") class TableLocIndices(TableLoc): def __getitem__(self, item): """ Retrieve Table row's indices by value slice. Parameters ---------- item : column element, list, ndarray, slice or tuple Can be a value of the table primary index, a list/ndarray of such values, or a value slice (both endpoints are included). If a tuple is provided, the first element must be an index to use instead of the primary key, and the second element must be as above. """ rows = self._get_rows(item) if len(rows) == 0: # no matches found raise KeyError(f"No matches found for key {item}") elif len(rows) == 1: # single row return rows[0] return rows class TableILoc(TableLoc): """ A variant of TableLoc allowing for row retrieval by indexed order rather than data values. Parameters ---------- table : Table Indexed table to use """ def __init__(self, table): super().__init__(table) def __getitem__(self, item): if isinstance(item, tuple): key, item = item else: key = self.table.primary_key index = self.indices[key] rows = index.sorted_data()[item] table_slice = self.table[rows] if len(table_slice) == 0: # no matches found raise IndexError(f"Invalid index for iloc: {item}") return table_slice
ba84d7311082045bb5e21709525921392c447fc3bf8b520018f5391b6d851696
# Licensed under a 3-clause BSD style license - see LICENSE.rst from os.path import abspath, dirname, join import astropy.config as _config import astropy.io.registry as io_registry from astropy import extern from .table import Table class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.table.jsviewer`. """ jquery_url = _config.ConfigItem( "https://code.jquery.com/jquery-3.6.0.min.js", "The URL to the jquery library." ) datatables_url = _config.ConfigItem( "https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js", "The URL to the jquery datatables library.", ) css_urls = _config.ConfigItem( ["https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css"], "The URLs to the css file(s) to include.", cfgtype="string_list", ) conf = Conf() EXTERN_JS_DIR = abspath(join(dirname(extern.__file__), "jquery", "data", "js")) EXTERN_CSS_DIR = abspath(join(dirname(extern.__file__), "jquery", "data", "css")) _SORTING_SCRIPT_PART_1 = """ var astropy_sort_num = function(a, b) {{ var a_num = parseFloat(a); var b_num = parseFloat(b); if (isNaN(a_num) && isNaN(b_num)) return ((a < b) ? -1 : ((a > b) ? 1 : 0)); else if (!isNaN(a_num) && !isNaN(b_num)) return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0)); else return isNaN(a_num) ? -1 : 1; }} """ _SORTING_SCRIPT_PART_2 = """ jQuery.extend( jQuery.fn.dataTableExt.oSort, {{ "optionalnum-asc": astropy_sort_num, "optionalnum-desc": function (a,b) {{ return -astropy_sort_num(a, b); }} }}); """ IPYNB_JS_SCRIPT = """ <script> %(sorting_script1)s require.config({{paths: {{ datatables: '{datatables_url}' }}}}); require(["datatables"], function(){{ console.log("$('#{tid}').dataTable()"); %(sorting_script2)s $('#{tid}').dataTable({{ order: [], pageLength: {display_length}, lengthMenu: {display_length_menu}, pagingType: "full_numbers", columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}] }}); }}); </script> """ % dict( sorting_script1=_SORTING_SCRIPT_PART_1, sorting_script2=_SORTING_SCRIPT_PART_2 ) HTML_JS_SCRIPT = ( _SORTING_SCRIPT_PART_1 + _SORTING_SCRIPT_PART_2 + """ $(document).ready(function() {{ $('#{tid}').dataTable({{ order: [], pageLength: {display_length}, lengthMenu: {display_length_menu}, pagingType: "full_numbers", columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}] }}); }} ); """ ) # Default CSS for the JSViewer writer DEFAULT_CSS = """\ body {font-family: sans-serif;} table.dataTable {width: auto !important; margin: 0 !important;} .dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em} """ # Default CSS used when rendering a table in the IPython notebook DEFAULT_CSS_NB = """\ table.dataTable {clear: both; width: auto !important; margin: 0 !important;} .dataTables_info, .dataTables_length, .dataTables_filter, .dataTables_paginate{ display: inline-block; margin-right: 1em; } .paginate_button { margin-right: 5px; } """ class JSViewer: """Provides an interactive HTML export of a Table. This class provides an interface to the `DataTables <https://datatables.net/>`_ library, which allow to visualize interactively an HTML table. It is used by the `~astropy.table.Table.show_in_browser` method. Parameters ---------- use_local_files : bool, optional Use local files or a CDN for JavaScript libraries. Default False. display_length : int, optional Number or rows to show. Default to 50. """ def __init__(self, use_local_files=False, display_length=50): self._use_local_files = use_local_files self.display_length_menu = [ [10, 25, 50, 100, 500, 1000, -1], [10, 25, 50, 100, 500, 1000, "All"], ] self.display_length = display_length for L in self.display_length_menu: if display_length not in L: L.insert(0, display_length) @property def jquery_urls(self): if self._use_local_files: return [ "file://" + join(EXTERN_JS_DIR, "jquery-3.6.0.min.js"), "file://" + join(EXTERN_JS_DIR, "jquery.dataTables.min.js"), ] else: return [conf.jquery_url, conf.datatables_url] @property def css_urls(self): if self._use_local_files: return ["file://" + join(EXTERN_CSS_DIR, "jquery.dataTables.css")] else: return conf.css_urls def _jstable_file(self): if self._use_local_files: return "file://" + join(EXTERN_JS_DIR, "jquery.dataTables.min") else: return conf.datatables_url[:-3] def ipynb(self, table_id, css=None, sort_columns="[]"): html = f"<style>{css if css is not None else DEFAULT_CSS_NB}</style>" html += IPYNB_JS_SCRIPT.format( display_length=self.display_length, display_length_menu=self.display_length_menu, datatables_url=self._jstable_file(), tid=table_id, sort_columns=sort_columns, ) return html def html_js(self, table_id="table0", sort_columns="[]"): return HTML_JS_SCRIPT.format( display_length=self.display_length, display_length_menu=self.display_length_menu, tid=table_id, sort_columns=sort_columns, ).strip() def write_table_jsviewer( table, filename, table_id=None, max_lines=5000, table_class="display compact", jskwargs=None, css=DEFAULT_CSS, htmldict=None, overwrite=False, ): if table_id is None: table_id = f"table{id(table)}" jskwargs = jskwargs or {} jsv = JSViewer(**jskwargs) sortable_columns = [ i for i, col in enumerate(table.columns.values()) if col.info.dtype.kind in "iufc" ] html_options = { "table_id": table_id, "table_class": table_class, "css": css, "cssfiles": jsv.css_urls, "jsfiles": jsv.jquery_urls, "js": jsv.html_js(table_id=table_id, sort_columns=sortable_columns), } if htmldict: html_options.update(htmldict) if max_lines < len(table): table = table[:max_lines] table.write(filename, format="html", htmldict=html_options, overwrite=overwrite) io_registry.register_writer("jsviewer", Table, write_table_jsviewer)
25d53a8a21f85932bb6358a1cae42cfe7e242a030af9b3fa531278fa60bf0e54
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.io import registry from .info import serialize_method_as __all__ = ["TableRead", "TableWrite"] __doctest_skip__ = ["TableRead", "TableWrite"] class TableRead(registry.UnifiedReadWrite): """Read and parse a data table and return as a Table. This function provides the Table interface to the astropy unified I/O layer. This allows easily reading a file in many supported data formats using syntax such as:: >>> from astropy.table import Table >>> dat = Table.read('table.dat', format='ascii') >>> events = Table.read('events.fits', format='fits') Get help on the available readers for ``Table`` using the``help()`` method:: >>> Table.read.help() # Get help reading Table and list supported formats >>> Table.read.help('fits') # Get detailed help on Table FITS reader >>> Table.read.list_formats() # Print list of available formats See also: https://docs.astropy.org/en/stable/io/unified.html Parameters ---------- *args : tuple, optional Positional arguments passed through to data reader. If supplied the first argument is typically the input filename. format : str File format specifier. units : list, dict, optional List or dict of units to apply to columns descriptions : list, dict, optional List or dict of descriptions to apply to columns **kwargs : dict, optional Keyword arguments passed through to data reader. Returns ------- out : `~astropy.table.Table` Table corresponding to file contents Notes ----- """ def __init__(self, instance, cls): super().__init__(instance, cls, "read", registry=None) # uses default global registry def __call__(self, *args, **kwargs): cls = self._cls units = kwargs.pop("units", None) descriptions = kwargs.pop("descriptions", None) out = self.registry.read(cls, *args, **kwargs) # For some readers (e.g., ascii.ecsv), the returned `out` class is not # guaranteed to be the same as the desired output `cls`. If so, # try coercing to desired class without copying (io.registry.read # would normally do a copy). The normal case here is swapping # Table <=> QTable. if cls is not out.__class__: try: out = cls(out, copy=False) except Exception: raise TypeError( f"could not convert reader output to {cls.__name__} class." ) out._set_column_attribute("unit", units) out._set_column_attribute("description", descriptions) return out class TableWrite(registry.UnifiedReadWrite): """ Write this Table object out in the specified format. This function provides the Table interface to the astropy unified I/O layer. This allows easily writing a file in many supported data formats using syntax such as:: >>> from astropy.table import Table >>> dat = Table([[1, 2], [3, 4]], names=('a', 'b')) >>> dat.write('table.dat', format='ascii') Get help on the available writers for ``Table`` using the``help()`` method:: >>> Table.write.help() # Get help writing Table and list supported formats >>> Table.write.help('fits') # Get detailed help on Table FITS writer >>> Table.write.list_formats() # Print list of available formats The ``serialize_method`` argument is explained in the section on `Table serialization methods <https://docs.astropy.org/en/latest/io/unified.html#table-serialization-methods>`_. See also: https://docs.astropy.org/en/stable/io/unified.html Parameters ---------- *args : tuple, optional Positional arguments passed through to data writer. If supplied the first argument is the output filename. format : str File format specifier. serialize_method : str, dict, optional Serialization method specifier for columns. **kwargs : dict, optional Keyword arguments passed through to data writer. Notes ----- """ def __init__(self, instance, cls): super().__init__(instance, cls, "write", registry=None) # uses default global registry def __call__(self, *args, serialize_method=None, **kwargs): instance = self._instance with serialize_method_as(instance, serialize_method): self.registry.write(instance, *args, **kwargs)
aa2f0c6d514473e3cd254fb02e58cbb8152136f1e455e171d71122e36a2dce0b
# Licensed under a 3-clause BSD style license - see LICENSE.rst import collections from collections import OrderedDict from operator import index as operator_index import numpy as np class Row: """A class to represent one row of a Table object. A Row object is returned when a Table object is indexed with an integer or when iterating over a table:: >>> from astropy.table import Table >>> table = Table([(1, 2), (3, 4)], names=('a', 'b'), ... dtype=('int32', 'int32')) >>> row = table[1] >>> row <Row index=1> a b int32 int32 ----- ----- 2 4 >>> row['a'] 2 >>> row[1] 4 """ def __init__(self, table, index): # Ensure that the row index is a valid index (int) index = operator_index(index) n = len(table) if index < -n or index >= n: raise IndexError( "index {} out of range for table with length {}".format( index, len(table) ) ) # Finally, ensure the index is positive [#8422] and set Row attributes self._index = index % n self._table = table def __getitem__(self, item): try: # Try the most common use case of accessing a single column in the Row. # Bypass the TableColumns __getitem__ since that does more testing # and allows a list of tuple or str, which is not the right thing here. out = OrderedDict.__getitem__(self._table.columns, item)[self._index] except (KeyError, TypeError): if self._table._is_list_or_tuple_of_str(item): cols = [self._table[name] for name in item] out = self._table.__class__(cols, copy=False)[self._index] else: # This is only to raise an exception out = self._table.columns[item][self._index] return out def __setitem__(self, item, val): if self._table._is_list_or_tuple_of_str(item): self._table._set_row(self._index, colnames=item, vals=val) else: self._table.columns[item][self._index] = val def _ipython_key_completions_(self): return self.colnames def __eq__(self, other): if self._table.masked: # Sent bug report to numpy-discussion group on 2012-Oct-21, subject: # "Comparing rows in a structured masked array raises exception" # No response, so this is still unresolved. raise ValueError( "Unable to compare rows for masked table due to numpy.ma bug" ) return self.as_void() == other def __ne__(self, other): if self._table.masked: raise ValueError( "Unable to compare rows for masked table due to numpy.ma bug" ) return self.as_void() != other def __array__(self, dtype=None): """Support converting Row to np.array via np.array(table). Coercion to a different dtype via np.array(table, dtype) is not supported and will raise a ValueError. If the parent table is masked then the mask information is dropped. """ if dtype is not None: raise ValueError("Datatype coercion is not allowed") return np.asarray(self.as_void()) def __len__(self): return len(self._table.columns) def __iter__(self): index = self._index for col in self._table.columns.values(): yield col[index] def keys(self): return self._table.columns.keys() def values(self): return self.__iter__() @property def table(self): return self._table @property def index(self): return self._index def as_void(self): """ Returns a *read-only* copy of the row values in the form of np.void or np.ma.mvoid objects. This corresponds to the object types returned for row indexing of a pure numpy structured array or masked array. This method is slow and its use is discouraged when possible. Returns ------- void_row : ``numpy.void`` or ``numpy.ma.mvoid`` Copy of row values. ``numpy.void`` if unmasked, ``numpy.ma.mvoid`` else. """ index = self._index cols = self._table.columns.values() vals = tuple(np.asarray(col)[index] for col in cols) if self._table.masked: mask = tuple( col.mask[index] if hasattr(col, "mask") else False for col in cols ) void_row = np.ma.array([vals], mask=[mask], dtype=self.dtype)[0] else: void_row = np.array([vals], dtype=self.dtype)[0] return void_row @property def meta(self): return self._table.meta @property def columns(self): return self._table.columns @property def colnames(self): return self._table.colnames @property def dtype(self): return self._table.dtype def _base_repr_(self, html=False): """ Display row as a single-line table but with appropriate header line. """ index = self.index if (self.index >= 0) else self.index + len(self._table) table = self._table[index : index + 1] descr_vals = [self.__class__.__name__, f"index={self.index}"] if table.masked: descr_vals.append("masked=True") return table._base_repr_( html, descr_vals, max_width=-1, tableid=f"table{id(self._table)}" ) def _repr_html_(self): return self._base_repr_(html=True) def __repr__(self): return self._base_repr_(html=False) def __str__(self): index = self.index if (self.index >= 0) else self.index + len(self._table) return "\n".join(self.table[index : index + 1].pformat(max_width=-1)) def __bytes__(self): return str(self).encode("utf-8") collections.abc.Sequence.register(Row)
6d92ac37413ec8766df47faa1ab565de2059a0e14ad9867f0ae3e017ae210b88
# Licensed under a 3-clause BSD style license - see LICENSE.rst import platform import warnings import numpy as np from astropy.utils.exceptions import AstropyUserWarning from .index import get_index_by_names __all__ = ["TableGroups", "ColumnGroups"] def table_group_by(table, keys): # index copies are unnecessary and slow down _table_group_by with table.index_mode("discard_on_copy"): return _table_group_by(table, keys) def _table_group_by(table, keys): """ Get groups for ``table`` on specified ``keys``. Parameters ---------- table : `Table` Table to group keys : str, list of str, `Table`, or Numpy array Grouping key specifier Returns ------- grouped_table : Table object with groups attr set accordingly """ from .serialize import represent_mixins_as_columns from .table import Table # Pre-convert string to tuple of strings, or Table to the underlying structured array if isinstance(keys, str): keys = (keys,) if isinstance(keys, (list, tuple)): for name in keys: if name not in table.colnames: raise ValueError(f"Table does not have key column {name!r}") if table.masked and np.any(table[name].mask): raise ValueError( f"Missing values in key column {name!r} are not allowed" ) # Make a column slice of the table without copying table_keys = table.__class__([table[key] for key in keys], copy=False) # If available get a pre-existing index for these columns table_index = get_index_by_names(table, keys) grouped_by_table_cols = True elif isinstance(keys, (np.ndarray, Table)): table_keys = keys if len(table_keys) != len(table): raise ValueError( "Input keys array length {} does not match table length {}".format( len(table_keys), len(table) ) ) table_index = None grouped_by_table_cols = False else: raise TypeError( "Keys input must be string, list, tuple, Table or numpy array, but got {}".format( type(keys) ) ) # If there is not already an available index and table_keys is a Table then ensure # that all cols (including mixins) are in a form that can sorted with the code below. if not table_index and isinstance(table_keys, Table): table_keys = represent_mixins_as_columns(table_keys) # Get the argsort index `idx_sort`, accounting for particulars try: # take advantage of index internal sort if possible if table_index is not None: idx_sort = table_index.sorted_data() else: idx_sort = table_keys.argsort(kind="mergesort") stable_sort = True except TypeError: # Some versions (likely 1.6 and earlier) of numpy don't support # 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable # sort by default, nor does Windows, while Linux does (or appears to). idx_sort = table_keys.argsort() stable_sort = platform.system() not in ("Darwin", "Windows") # Finally do the actual sort of table_keys values table_keys = table_keys[idx_sort] # Get all keys diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True])) indices = np.flatnonzero(diffs) # If the sort is not stable (preserves original table order) then sort idx_sort in # place within each group. if not stable_sort: for i0, i1 in zip(indices[:-1], indices[1:]): idx_sort[i0:i1].sort() # Make a new table and set the _groups to the appropriate TableGroups object. # Take the subset of the original keys at the indices values (group boundaries). out = table.__class__(table[idx_sort]) out_keys = table_keys[indices[:-1]] if isinstance(out_keys, Table): out_keys.meta["grouped_by_table_cols"] = grouped_by_table_cols out._groups = TableGroups(out, indices=indices, keys=out_keys) return out def column_group_by(column, keys): """ Get groups for ``column`` on specified ``keys`` Parameters ---------- column : Column object Column to group keys : Table or Numpy array of same length as col Grouping key specifier Returns ------- grouped_column : Column object with groups attr set accordingly """ from .serialize import represent_mixins_as_columns from .table import Table if isinstance(keys, Table): keys = represent_mixins_as_columns(keys) keys = keys.as_array() if not isinstance(keys, np.ndarray): raise TypeError(f"Keys input must be numpy array, but got {type(keys)}") if len(keys) != len(column): raise ValueError( "Input keys array length {} does not match column length {}".format( len(keys), len(column) ) ) idx_sort = keys.argsort() keys = keys[idx_sort] # Get all keys diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True])) indices = np.flatnonzero(diffs) # Make a new column and set the _groups to the appropriate ColumnGroups object. # Take the subset of the original keys at the indices values (group boundaries). out = column.__class__(column[idx_sort]) out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]]) return out class BaseGroups: """ A class to represent groups within a table of heterogeneous data. - ``keys``: key values corresponding to each group - ``indices``: index values in parent table or column corresponding to group boundaries - ``aggregate()``: method to create new table by aggregating within groups """ @property def parent(self): return ( self.parent_column if isinstance(self, ColumnGroups) else self.parent_table ) def __iter__(self): self._iter_index = 0 return self def next(self): ii = self._iter_index if ii < len(self.indices) - 1: i0, i1 = self.indices[ii], self.indices[ii + 1] self._iter_index += 1 return self.parent[i0:i1] else: raise StopIteration __next__ = next def __getitem__(self, item): parent = self.parent if isinstance(item, (int, np.integer)): i0, i1 = self.indices[item], self.indices[item + 1] out = parent[i0:i1] out.groups._keys = parent.groups.keys[item] else: indices0, indices1 = self.indices[:-1], self.indices[1:] try: i0s, i1s = indices0[item], indices1[item] except Exception as err: raise TypeError( "Index item for groups attribute must be a slice, " "numpy mask or int array" ) from err mask = np.zeros(len(parent), dtype=bool) # Is there a way to vectorize this in numpy? for i0, i1 in zip(i0s, i1s): mask[i0:i1] = True out = parent[mask] out.groups._keys = parent.groups.keys[item] out.groups._indices = np.concatenate([[0], np.cumsum(i1s - i0s)]) return out def __repr__(self): return f"<{self.__class__.__name__} indices={self.indices}>" def __len__(self): return len(self.indices) - 1 class ColumnGroups(BaseGroups): def __init__(self, parent_column, indices=None, keys=None): self.parent_column = parent_column # parent Column self.parent_table = parent_column.info.parent_table self._indices = indices self._keys = keys @property def indices(self): # If the parent column is in a table then use group indices from table if self.parent_table: return self.parent_table.groups.indices else: if self._indices is None: return np.array([0, len(self.parent_column)]) else: return self._indices @property def keys(self): # If the parent column is in a table then use group indices from table if self.parent_table: return self.parent_table.groups.keys else: return self._keys def aggregate(self, func): from .column import MaskedColumn i0s, i1s = self.indices[:-1], self.indices[1:] par_col = self.parent_column masked = isinstance(par_col, MaskedColumn) reduceat = hasattr(func, "reduceat") sum_case = func is np.sum mean_case = func is np.mean try: if not masked and (reduceat or sum_case or mean_case): if mean_case: vals = np.add.reduceat(par_col, i0s) / np.diff(self.indices) else: if sum_case: func = np.add vals = func.reduceat(par_col, i0s) else: vals = np.array([func(par_col[i0:i1]) for i0, i1 in zip(i0s, i1s)]) out = par_col.__class__(vals) except Exception as err: raise TypeError( "Cannot aggregate column '{}' with type '{}': {}".format( par_col.info.name, par_col.info.dtype, err ) ) from err out_info = out.info for attr in ("name", "unit", "format", "description", "meta"): try: setattr(out_info, attr, getattr(par_col.info, attr)) except AttributeError: pass return out def filter(self, func): """ Filter groups in the Column based on evaluating function ``func`` on each group sub-table. The function which is passed to this method must accept one argument: - ``column`` : `Column` object It must then return either `True` or `False`. As an example, the following will select all column groups with only positive values:: def all_positive(column): if np.any(column < 0): return False return True Parameters ---------- func : function Filter function Returns ------- out : Column New column with the aggregated rows. """ mask = np.empty(len(self), dtype=bool) for i, group_column in enumerate(self): mask[i] = func(group_column) return self[mask] class TableGroups(BaseGroups): def __init__(self, parent_table, indices=None, keys=None): self.parent_table = parent_table # parent Table self._indices = indices self._keys = keys @property def key_colnames(self): """ Return the names of columns in the parent table that were used for grouping. """ # If the table was grouped by key columns *in* the table then treat those columns # differently in aggregation. In this case keys will be a Table with # keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we # need to handle this. grouped_by_table_cols = getattr(self.keys, "meta", {}).get( "grouped_by_table_cols", False ) return self.keys.colnames if grouped_by_table_cols else () @property def indices(self): if self._indices is None: return np.array([0, len(self.parent_table)]) else: return self._indices def aggregate(self, func): """ Aggregate each group in the Table into a single row by applying the reduction function ``func`` to group values in each column. Parameters ---------- func : function Function that reduces an array of values to a single value Returns ------- out : Table New table with the aggregated rows. """ i0s = self.indices[:-1] out_cols = [] parent_table = self.parent_table for col in parent_table.columns.values(): # For key columns just pick off first in each group since they are identical if col.info.name in self.key_colnames: new_col = col.take(i0s) else: try: new_col = col.info.groups.aggregate(func) except TypeError as err: warnings.warn(str(err), AstropyUserWarning) continue out_cols.append(new_col) return parent_table.__class__(out_cols, meta=parent_table.meta) def filter(self, func): """ Filter groups in the Table based on evaluating function ``func`` on each group sub-table. The function which is passed to this method must accept two arguments: - ``table`` : `Table` object - ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping It must then return either `True` or `False`. As an example, the following will select all table groups with only positive values in the non-key columns:: def all_positive(table, key_colnames): colnames = [name for name in table.colnames if name not in key_colnames] for colname in colnames: if np.any(table[colname] < 0): return False return True Parameters ---------- func : function Filter function Returns ------- out : Table New table with the aggregated rows. """ mask = np.empty(len(self), dtype=bool) key_colnames = self.key_colnames for i, group_table in enumerate(self): mask[i] = func(group_table, key_colnames) return self[mask] @property def keys(self): return self._keys
0e8b0a5b31293a302347c221ed6448c2f2402ce384b625aada5067ac94de25c2
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import sys import types import warnings import weakref from collections import OrderedDict, defaultdict from collections.abc import Mapping from copy import deepcopy import numpy as np from numpy import ma from astropy import log from astropy.io.registry import UnifiedReadWriteMethod from astropy.units import Quantity, QuantityInfo from astropy.utils import ShapedLikeNDArray, isiterable from astropy.utils.console import color_print from astropy.utils.data_info import BaseColumnInfo, DataInfo, MixinInfo from astropy.utils.decorators import format_doc from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.masked import Masked from astropy.utils.metadata import MetaAttribute, MetaData from . import conf, groups from .column import ( BaseColumn, Column, FalseArray, MaskedColumn, _auto_names, _convert_sequence_data_to_array, col_copy, ) from .connect import TableRead, TableWrite from .index import ( Index, SlicedIndex, TableILoc, TableIndices, TableLoc, TableLocIndices, _IndexModeContext, get_index, ) from .info import TableInfo from .mixins.registry import get_mixin_handler from .ndarray_mixin import NdarrayMixin # noqa: F401 from .pprint import TableFormatter from .row import Row _implementation_notes = """ This string has informal notes concerning Table implementation for developers. Things to remember: - Table has customizable attributes ColumnClass, Column, MaskedColumn. Table.Column is normally just column.Column (same w/ MaskedColumn) but in theory they can be different. Table.ColumnClass is the default class used to create new non-mixin columns, and this is a function of the Table.masked attribute. Column creation / manipulation in a Table needs to respect these. - Column objects that get inserted into the Table.columns attribute must have the info.parent_table attribute set correctly. Beware just dropping an object into the columns dict since an existing column may be part of another Table and have parent_table set to point at that table. Dropping that column into `columns` of this Table will cause a problem for the old one so the column object needs to be copied (but not necessarily the data). Currently replace_column is always making a copy of both object and data if parent_table is set. This could be improved but requires a generic way to copy a mixin object but not the data. - Be aware of column objects that have indices set. - `cls.ColumnClass` is a property that effectively uses the `masked` attribute to choose either `cls.Column` or `cls.MaskedColumn`. """ __doctest_skip__ = [ "Table.read", "Table.write", "Table._read", "Table.convert_bytestring_to_unicode", "Table.convert_unicode_to_bytestring", ] __doctest_requires__ = {"*pandas": ["pandas>=1.1"]} _pprint_docs = """ {__doc__} Parameters ---------- max_lines : int or None Maximum number of lines in table output. max_width : int or None Maximum character width of output. show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is False. align : str or list or tuple or None Left/right alignment of columns. Default is right (None) for all columns. Other allowed values are '>', '<', '^', and '0=' for right, left, centered, and 0-padded, respectively. A list of strings can be provided for alignment of tables with multiple columns. """ _pformat_docs = """ {__doc__} Parameters ---------- max_lines : int or None Maximum number of rows to output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is True. html : bool Format the output as an HTML table. Default is False. tableid : str or None An ID tag for the table; only used if html is set. Default is "table{id}", where id is the unique integer id of the table object, id(self) align : str or list or tuple or None Left/right alignment of columns. Default is right (None) for all columns. Other allowed values are '>', '<', '^', and '0=' for right, left, centered, and 0-padded, respectively. A list of strings can be provided for alignment of tables with multiple columns. tableclass : str or list of str or None CSS classes for the table; only used if html is set. Default is None. Returns ------- lines : list Formatted table as a list of strings. """ class TableReplaceWarning(UserWarning): """ Warning class for cases when a table column is replaced via the Table.__setitem__ syntax e.g. t['a'] = val. This does not inherit from AstropyWarning because we want to use stacklevel=3 to show the user where the issue occurred in their code. """ pass def descr(col): """Array-interface compliant full description of a column. This returns a 3-tuple (name, type, shape) that can always be used in a structured array dtype definition. """ col_dtype = "O" if (col.info.dtype is None) else col.info.dtype col_shape = col.shape[1:] if hasattr(col, "shape") else () return (col.info.name, col_dtype, col_shape) def has_info_class(obj, cls): """Check if the object's info is an instance of cls.""" # We check info on the class of the instance, since on the instance # itself accessing 'info' has side effects in that it sets # obj.__dict__['info'] if it does not exist already. return isinstance(getattr(obj.__class__, "info", None), cls) def _get_names_from_list_of_dict(rows): """Return list of column names if ``rows`` is a list of dict that defines table data. If rows is not a list of dict then return None. """ if rows is None: return None names = set() for row in rows: if not isinstance(row, Mapping): return None names.update(row) return list(names) # Note to future maintainers: when transitioning this to dict # be sure to change the OrderedDict ref(s) in Row and in __len__(). class TableColumns(OrderedDict): """OrderedDict subclass for a set of columns. This class enhances item access to provide convenient access to columns by name or index, including slice access. It also handles renaming of columns. The initialization argument ``cols`` can be a list of ``Column`` objects or any structure that is valid for initializing a Python dict. This includes a dict, list of (key, val) tuples or [key, val] lists, etc. Parameters ---------- cols : dict, list, tuple; optional Column objects as data structure that can init dict (see above) """ def __init__(self, cols={}): if isinstance(cols, (list, tuple)): # `cols` should be a list of two-tuples, but it is allowed to have # columns (BaseColumn or mixins) in the list. newcols = [] for col in cols: if has_info_class(col, BaseColumnInfo): newcols.append((col.info.name, col)) else: newcols.append(col) cols = newcols super().__init__(cols) def __getitem__(self, item): """Get items from a TableColumns object. :: tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')]) tc['a'] # Column('a') tc[1] # Column('b') tc['a', 'b'] # <TableColumns names=('a', 'b')> tc[1:3] # <TableColumns names=('b', 'c')> """ if isinstance(item, str): return OrderedDict.__getitem__(self, item) elif isinstance(item, (int, np.integer)): return list(self.values())[item] elif ( isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i" ): return list(self.values())[item.item()] elif isinstance(item, tuple): return self.__class__([self[x] for x in item]) elif isinstance(item, slice): return self.__class__([self[x] for x in list(self)[item]]) else: raise IndexError( "Illegal key or index value for {} object".format( self.__class__.__name__ ) ) def __setitem__(self, item, value, validated=False): """ Set item in this dict instance, but do not allow directly replacing an existing column unless it is already validated (and thus is certain to not corrupt the table). NOTE: it is easily possible to corrupt a table by directly *adding* a new key to the TableColumns attribute of a Table, e.g. ``t.columns['jane'] = 'doe'``. """ if item in self and not validated: raise ValueError( "Cannot replace column '{}'. Use Table.replace_column() instead.".format( item ) ) super().__setitem__(item, value) def __repr__(self): names = (f"'{x}'" for x in self.keys()) return f"<{self.__class__.__name__} names=({','.join(names)})>" def _rename_column(self, name, new_name): if name == new_name: return if new_name in self: raise KeyError(f"Column {new_name} already exists") # Rename column names in pprint include/exclude attributes as needed parent_table = self[name].info.parent_table if parent_table is not None: parent_table.pprint_exclude_names._rename(name, new_name) parent_table.pprint_include_names._rename(name, new_name) mapper = {name: new_name} new_names = [mapper.get(name, name) for name in self] cols = list(self.values()) self.clear() self.update(list(zip(new_names, cols))) def __delitem__(self, name): # Remove column names from pprint include/exclude attributes as needed. # __delitem__ also gets called for pop() and popitem(). parent_table = self[name].info.parent_table if parent_table is not None: # _remove() method does not require that `name` is in the attribute parent_table.pprint_exclude_names._remove(name) parent_table.pprint_include_names._remove(name) return super().__delitem__(name) def isinstance(self, cls): """ Return a list of columns which are instances of the specified classes. Parameters ---------- cls : class or tuple thereof Column class (including mixin) or tuple of Column classes. Returns ------- col_list : list of `Column` List of Column objects which are instances of given classes. """ cols = [col for col in self.values() if isinstance(col, cls)] return cols def not_isinstance(self, cls): """ Return a list of columns which are not instances of the specified classes. Parameters ---------- cls : class or tuple thereof Column class (including mixin) or tuple of Column classes. Returns ------- col_list : list of `Column` List of Column objects which are not instances of given classes. """ cols = [col for col in self.values() if not isinstance(col, cls)] return cols class TableAttribute(MetaAttribute): """ Descriptor to define a custom attribute for a Table subclass. The value of the ``TableAttribute`` will be stored in a dict named ``__attributes__`` that is stored in the table ``meta``. The attribute can be accessed and set in the usual way, and it can be provided when creating the object. Defining an attribute by this mechanism ensures that it will persist if the table is sliced or serialized, for example as a pickle or ECSV file. See the `~astropy.utils.metadata.MetaAttribute` documentation for additional details. Parameters ---------- default : object Default value for attribute Examples -------- >>> from astropy.table import Table, TableAttribute >>> class MyTable(Table): ... identifier = TableAttribute(default=1) >>> t = MyTable(identifier=10) >>> t.identifier 10 >>> t.meta OrderedDict([('__attributes__', {'identifier': 10})]) """ class PprintIncludeExclude(TableAttribute): """Maintain tuple that controls table column visibility for print output. This is a descriptor that inherits from MetaAttribute so that the attribute value is stored in the table meta['__attributes__']. This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table attributes. """ def __get__(self, instance, owner_cls): """Get the attribute. This normally returns an instance of this class which is stored on the owner object. """ # For getting from class not an instance if instance is None: return self # If not already stored on `instance`, make a copy of the class # descriptor object and put it onto the instance. value = instance.__dict__.get(self.name) if value is None: value = deepcopy(self) instance.__dict__[self.name] = value # We set _instance_ref on every call, since if one makes copies of # instances, this attribute will be copied as well, which will lose the # reference. value._instance_ref = weakref.ref(instance) return value def __set__(self, instance, names): """Set value of ``instance`` attribute to ``names``. Parameters ---------- instance : object Instance that owns the attribute names : None, str, list, tuple Column name(s) to store, or None to clear """ if isinstance(names, str): names = [names] if names is None: # Remove attribute value from the meta['__attributes__'] dict. # Subsequent access will just return None. delattr(instance, self.name) else: # This stores names into instance.meta['__attributes__'] as tuple return super().__set__(instance, tuple(names)) def __call__(self): """Get the value of the attribute. Returns ------- names : None, tuple Include/exclude names """ # Get the value from instance.meta['__attributes__'] instance = self._instance_ref() return super().__get__(instance, instance.__class__) def __repr__(self): if hasattr(self, "_instance_ref"): out = f"<{self.__class__.__name__} name={self.name} value={self()}>" else: out = super().__repr__() return out def _add_remove_setup(self, names): """Common setup for add and remove. - Coerce attribute value to a list - Coerce names into a list - Get the parent table instance """ names = [names] if isinstance(names, str) else list(names) # Get the value. This is the same as self() but we need `instance` here. instance = self._instance_ref() value = super().__get__(instance, instance.__class__) value = [] if value is None else list(value) return instance, names, value def add(self, names): """Add ``names`` to the include/exclude attribute. Parameters ---------- names : str, list, tuple Column name(s) to add """ instance, names, value = self._add_remove_setup(names) value.extend(name for name in names if name not in value) super().__set__(instance, tuple(value)) def remove(self, names): """Remove ``names`` from the include/exclude attribute. Parameters ---------- names : str, list, tuple Column name(s) to remove """ self._remove(names, raise_exc=True) def _remove(self, names, raise_exc=False): """Remove ``names`` with optional checking if they exist""" instance, names, value = self._add_remove_setup(names) # Return now if there are no attributes and thus no action to be taken. if not raise_exc and "__attributes__" not in instance.meta: return # Remove one by one, optionally raising an exception if name is missing. for name in names: if name in value: value.remove(name) # Using the list.remove method elif raise_exc: raise ValueError(f"{name} not in {self.name}") # Change to either None or a tuple for storing back to attribute value = None if value == [] else tuple(value) self.__set__(instance, value) def _rename(self, name, new_name): """Rename ``name`` to ``new_name`` if ``name`` is in the list""" names = self() or () if name in names: new_names = list(names) new_names[new_names.index(name)] = new_name self.set(new_names) def set(self, names): """Set value of include/exclude attribute to ``names``. Parameters ---------- names : None, str, list, tuple Column name(s) to store, or None to clear """ class _Context: def __init__(self, descriptor_self): self.descriptor_self = descriptor_self self.names_orig = descriptor_self() def __enter__(self): pass def __exit__(self, type, value, tb): descriptor_self = self.descriptor_self instance = descriptor_self._instance_ref() descriptor_self.__set__(instance, self.names_orig) def __repr__(self): return repr(self.descriptor_self) ctx = _Context(descriptor_self=self) instance = self._instance_ref() self.__set__(instance, names) return ctx class Table: """A class to represent tables of heterogeneous data. `~astropy.table.Table` provides a class for heterogeneous tabular data. A key enhancement provided by the `~astropy.table.Table` class over e.g. a `numpy` structured array is the ability to easily modify the structure of the table by adding or removing columns, or adding new rows of data. In addition table and column metadata are fully supported. `~astropy.table.Table` differs from `~astropy.nddata.NDData` by the assumption that the input data consists of columns of homogeneous data, where each column has a unique identifier and may contain additional metadata such as the data unit, format, and description. See also: https://docs.astropy.org/en/stable/table/ Parameters ---------- data : numpy ndarray, dict, list, table-like object, optional Data to initialize table. masked : bool, optional Specify whether the table is masked. names : list, optional Specify column names. dtype : list, optional Specify column data types. meta : dict, optional Metadata associated with the table. copy : bool, optional Copy the input data. If the input is a Table the ``meta`` is always copied regardless of the ``copy`` parameter. Default is True. rows : numpy ndarray, list of list, optional Row-oriented data for table instead of ``data`` argument. copy_indices : bool, optional Copy any indices in the input data. Default is True. units : list, dict, optional List or dict of units to apply to columns. descriptions : list, dict, optional List or dict of descriptions to apply to columns. **kwargs : dict, optional Additional keyword args when converting table-like object. """ meta = MetaData(copy=False) # Define class attributes for core container objects to allow for subclass # customization. Row = Row Column = Column MaskedColumn = MaskedColumn TableColumns = TableColumns TableFormatter = TableFormatter # Unified I/O read and write methods from .connect read = UnifiedReadWriteMethod(TableRead) write = UnifiedReadWriteMethod(TableWrite) pprint_exclude_names = PprintIncludeExclude() pprint_include_names = PprintIncludeExclude() def as_array(self, keep_byteorder=False, names=None): """ Return a new copy of the table in the form of a structured np.ndarray or np.ma.MaskedArray object (as appropriate). Parameters ---------- keep_byteorder : bool, optional By default the returned array has all columns in native byte order. However, if this option is `True` this preserves the byte order of all columns (if any are non-native). names : list, optional: List of column names to include for returned structured array. Default is to include all table columns. Returns ------- table_array : array or `~numpy.ma.MaskedArray` Copy of table as a numpy structured array. ndarray for unmasked or `~numpy.ma.MaskedArray` for masked. """ masked = self.masked or self.has_masked_columns or self.has_masked_values empty_init = ma.empty if masked else np.empty if len(self.columns) == 0: return empty_init(0, dtype=None) dtype = [] cols = self.columns.values() if names is not None: cols = [col for col in cols if col.info.name in names] for col in cols: col_descr = descr(col) if not (col.info.dtype.isnative or keep_byteorder): new_dt = np.dtype(col_descr[1]).newbyteorder("=") col_descr = (col_descr[0], new_dt, col_descr[2]) dtype.append(col_descr) data = empty_init(len(self), dtype=dtype) for col in cols: # When assigning from one array into a field of a structured array, # Numpy will automatically swap those columns to their destination # byte order where applicable data[col.info.name] = col # For masked out, masked mixin columns need to set output mask attribute. if masked and has_info_class(col, MixinInfo) and hasattr(col, "mask"): data[col.info.name].mask = col.mask return data def __init__( self, data=None, masked=False, names=None, dtype=None, meta=None, copy=True, rows=None, copy_indices=True, units=None, descriptions=None, **kwargs, ): # Set up a placeholder empty table self._set_masked(masked) self.columns = self.TableColumns() self.formatter = self.TableFormatter() self._copy_indices = True # copy indices from this Table by default self._init_indices = copy_indices # whether to copy indices in init self.primary_key = None # Must copy if dtype are changing if not copy and dtype is not None: raise ValueError("Cannot specify dtype when copy=False") # Specifies list of names found for the case of initializing table with # a list of dict. If data are not list of dict then this is None. names_from_list_of_dict = None # Row-oriented input, e.g. list of lists or list of tuples, list of # dict, Row instance. Set data to something that the subsequent code # will parse correctly. if rows is not None: if data is not None: raise ValueError("Cannot supply both `data` and `rows` values") if isinstance(rows, types.GeneratorType): # Without this then the all(..) test below uses up the generator rows = list(rows) # Get column names if `rows` is a list of dict, otherwise this is None names_from_list_of_dict = _get_names_from_list_of_dict(rows) if names_from_list_of_dict: data = rows elif isinstance(rows, self.Row): data = rows else: data = list(zip(*rows)) # Infer the type of the input data and set up the initialization # function, number of columns, and potentially the default col names default_names = None # Handle custom (subclass) table attributes that are stored in meta. # These are defined as class attributes using the TableAttribute # descriptor. Any such attributes get removed from kwargs here and # stored for use after the table is otherwise initialized. Any values # provided via kwargs will have precedence over existing values from # meta (e.g. from data as a Table or meta via kwargs). meta_table_attrs = {} if kwargs: for attr in list(kwargs): descr = getattr(self.__class__, attr, None) if isinstance(descr, TableAttribute): meta_table_attrs[attr] = kwargs.pop(attr) if hasattr(data, "__astropy_table__"): # Data object implements the __astropy_table__ interface method. # Calling that method returns an appropriate instance of # self.__class__ and respects the `copy` arg. The returned # Table object should NOT then be copied. data = data.__astropy_table__(self.__class__, copy, **kwargs) copy = False elif kwargs: raise TypeError( "__init__() got unexpected keyword argument {!r}".format( list(kwargs.keys())[0] ) ) if isinstance(data, np.ndarray) and data.shape == (0,) and not data.dtype.names: data = None if isinstance(data, self.Row): data = data._table[data._index : data._index + 1] if isinstance(data, (list, tuple)): # Get column names from `data` if it is a list of dict, otherwise this is None. # This might be previously defined if `rows` was supplied as an init arg. names_from_list_of_dict = ( names_from_list_of_dict or _get_names_from_list_of_dict(data) ) if names_from_list_of_dict: init_func = self._init_from_list_of_dicts n_cols = len(names_from_list_of_dict) else: init_func = self._init_from_list n_cols = len(data) elif isinstance(data, np.ndarray): if data.dtype.names: init_func = self._init_from_ndarray # _struct n_cols = len(data.dtype.names) default_names = data.dtype.names else: init_func = self._init_from_ndarray # _homog if data.shape == (): raise ValueError("Can not initialize a Table with a scalar") elif len(data.shape) == 1: data = data[np.newaxis, :] n_cols = data.shape[1] elif isinstance(data, Mapping): init_func = self._init_from_dict default_names = list(data) n_cols = len(default_names) elif isinstance(data, Table): # If user-input meta is None then use data.meta (if non-trivial) if meta is None and data.meta: # At this point do NOT deepcopy data.meta as this will happen after # table init_func() is called. But for table input the table meta # gets a key copy here if copy=False because later a direct object ref # is used. meta = data.meta if copy else data.meta.copy() # Handle indices on input table. Copy primary key and don't copy indices # if the input Table is in non-copy mode. self.primary_key = data.primary_key self._init_indices = self._init_indices and data._copy_indices # Extract default names, n_cols, and then overwrite ``data`` to be the # table columns so we can use _init_from_list. default_names = data.colnames n_cols = len(default_names) data = list(data.columns.values()) init_func = self._init_from_list elif data is None: if names is None: if dtype is None: # Table was initialized as `t = Table()`. Set up for empty # table with names=[], data=[], and n_cols=0. # self._init_from_list() will simply return, giving the # expected empty table. names = [] else: try: # No data nor names but dtype is available. This must be # valid to initialize a structured array. dtype = np.dtype(dtype) names = dtype.names dtype = [dtype[name] for name in names] except Exception: raise ValueError( "dtype was specified but could not be " "parsed for column names" ) # names is guaranteed to be set at this point init_func = self._init_from_list n_cols = len(names) data = [[]] * n_cols else: raise ValueError(f"Data type {type(data)} not allowed to init Table") # Set up defaults if names and/or dtype are not specified. # A value of None means the actual value will be inferred # within the appropriate initialization routine, either from # existing specification or auto-generated. if dtype is None: dtype = [None] * n_cols elif isinstance(dtype, np.dtype): if default_names is None: default_names = dtype.names # Convert a numpy dtype input to a list of dtypes for later use. dtype = [dtype[name] for name in dtype.names] if names is None: names = default_names or [None] * n_cols names = [None if name is None else str(name) for name in names] self._check_names_dtype(names, dtype, n_cols) # Finally do the real initialization init_func(data, names, dtype, n_cols, copy) # Set table meta. If copy=True then deepcopy meta otherwise use the # user-supplied meta directly. if meta is not None: self.meta = deepcopy(meta) if copy else meta # Update meta with TableAttributes supplied as kwargs in Table init. # This takes precedence over previously-defined meta. if meta_table_attrs: for attr, value in meta_table_attrs.items(): setattr(self, attr, value) # Whatever happens above, the masked property should be set to a boolean if self.masked not in (None, True, False): raise TypeError("masked property must be None, True or False") self._set_column_attribute("unit", units) self._set_column_attribute("description", descriptions) def _set_column_attribute(self, attr, values): """Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column name) or a dict of name: value pairs. This is used for handling the ``units`` and ``descriptions`` kwargs to ``__init__``. """ if not values: return if isinstance(values, Row): # For a Row object transform to an equivalent dict. values = {name: values[name] for name in values.colnames} if not isinstance(values, Mapping): # If not a dict map, assume iterable and map to dict if the right length if len(values) != len(self.columns): raise ValueError( f"sequence of {attr} values must match number of columns" ) values = dict(zip(self.colnames, values)) for name, value in values.items(): if name not in self.columns: raise ValueError( f"invalid column name {name} for setting {attr} attribute" ) # Special case: ignore unit if it is an empty or blank string if attr == "unit" and isinstance(value, str): if value.strip() == "": value = None if value not in (np.ma.masked, None): setattr(self[name].info, attr, value) def __getstate__(self): columns = OrderedDict( (key, col if isinstance(col, BaseColumn) else col_copy(col)) for key, col in self.columns.items() ) return (columns, self.meta) def __setstate__(self, state): columns, meta = state self.__init__(columns, meta=meta) @property def mask(self): # Dynamic view of available masks if self.masked or self.has_masked_columns or self.has_masked_values: mask_table = Table( [ getattr(col, "mask", FalseArray(col.shape)) for col in self.itercols() ], names=self.colnames, copy=False, ) # Set hidden attribute to force inplace setitem so that code like # t.mask['a'] = [1, 0, 1] will correctly set the underlying mask. # See #5556 for discussion. mask_table._setitem_inplace = True else: mask_table = None return mask_table @mask.setter def mask(self, val): self.mask[:] = val @property def _mask(self): """This is needed so that comparison of a masked Table and a MaskedArray works. The requirement comes from numpy.ma.core so don't remove this property.""" return self.as_array().mask def filled(self, fill_value=None): """Return copy of self, with masked values filled. If input ``fill_value`` supplied then that value is used for all masked entries in the table. Otherwise the individual ``fill_value`` defined for each table column is used. Parameters ---------- fill_value : str If supplied, this ``fill_value`` is used for all masked entries in the entire table. Returns ------- filled_table : `~astropy.table.Table` New table with masked values filled """ if self.masked or self.has_masked_columns or self.has_masked_values: # Get new columns with masked values filled, then create Table with those # new cols (copy=False) but deepcopy the meta. data = [ col.filled(fill_value) if hasattr(col, "filled") else col for col in self.itercols() ] return self.__class__(data, meta=deepcopy(self.meta), copy=False) else: # Return copy of the original object. return self.copy() @property def indices(self): """ Return the indices associated with columns of the table as a TableIndices object. """ lst = [] for column in self.columns.values(): for index in column.info.indices: if sum(index is x for x in lst) == 0: # ensure uniqueness lst.append(index) return TableIndices(lst) @property def loc(self): """ Return a TableLoc object that can be used for retrieving rows by index in a given data range. Note that both loc and iloc work only with single-column indices. """ return TableLoc(self) @property def loc_indices(self): """ Return a TableLocIndices object that can be used for retrieving the row indices corresponding to given table index key value or values. """ return TableLocIndices(self) @property def iloc(self): """ Return a TableILoc object that can be used for retrieving indexed rows in the order they appear in the index. """ return TableILoc(self) def add_index(self, colnames, engine=None, unique=False): """ Insert a new index among one or more columns. If there are no indices, make this index the primary table index. Parameters ---------- colnames : str or list List of column names (or a single column name) to index engine : type or None Indexing engine class to use, either `~astropy.table.SortedArray`, `~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied argument is None (by default), use `~astropy.table.SortedArray`. unique : bool Whether the values of the index must be unique. Default is False. """ if isinstance(colnames, str): colnames = (colnames,) columns = self.columns[tuple(colnames)].values() # make sure all columns support indexing for col in columns: if not getattr(col.info, "_supports_indexing", False): raise ValueError( 'Cannot create an index on column "{}", of type "{}"'.format( col.info.name, type(col) ) ) is_primary = not self.indices index = Index(columns, engine=engine, unique=unique) sliced_index = SlicedIndex(index, slice(0, 0, None), original=True) if is_primary: self.primary_key = colnames for col in columns: col.info.indices.append(sliced_index) def remove_indices(self, colname): """ Remove all indices involving the given column. If the primary index is removed, the new primary index will be the most recently added remaining index. Parameters ---------- colname : str Name of column """ col = self.columns[colname] for index in self.indices: try: index.col_position(col.info.name) except ValueError: pass else: for c in index.columns: c.info.indices.remove(index) def index_mode(self, mode): """ Return a context manager for an indexing mode. Parameters ---------- mode : str Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'. In 'discard_on_copy' mode, indices are not copied whenever columns or tables are copied. In 'freeze' mode, indices are not modified whenever columns are modified; at the exit of the context, indices refresh themselves based on column values. This mode is intended for scenarios in which one intends to make many additions or modifications in an indexed column. In 'copy_on_getitem' mode, indices are copied when taking column slices as well as table slices, so col[i0:i1] will preserve indices. """ return _IndexModeContext(self, mode) def __array__(self, dtype=None): """Support converting Table to np.array via np.array(table). Coercion to a different dtype via np.array(table, dtype) is not supported and will raise a ValueError. """ if dtype is not None: if np.dtype(dtype) != object: raise ValueError("Datatype coercion is not allowed") out = np.array(None, dtype=object) out[()] = self return out # This limitation is because of the following unexpected result that # should have made a table copy while changing the column names. # # >>> d = astropy.table.Table([[1,2],[3,4]]) # >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')]) # array([(0, 0), (0, 0)], # dtype=[('a', '<i8'), ('b', '<i8')]) out = self.as_array() return out.data if isinstance(out, np.ma.MaskedArray) else out def _check_names_dtype(self, names, dtype, n_cols): """Make sure that names and dtype are both iterable and have the same length as data. """ for inp_list, inp_str in ((dtype, "dtype"), (names, "names")): if not isiterable(inp_list): raise ValueError(f"{inp_str} must be a list or None") if len(names) != n_cols or len(dtype) != n_cols: raise ValueError( 'Arguments "names" and "dtype" must match number of columns' ) def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy): """Initialize table from a list of dictionaries representing rows.""" # Define placeholder for missing values as a unique object that cannot # every occur in user data. MISSING = object() # Gather column names that exist in the input `data`. names_from_data = set() for row in data: names_from_data.update(row) if set(data[0].keys()) == names_from_data: names_from_data = list(data[0].keys()) else: names_from_data = sorted(names_from_data) # Note: if set(data[0].keys()) != names_from_data, this will give an # exception later, so NO need to catch here. # Convert list of dict into dict of list (cols), keep track of missing # indexes and put in MISSING placeholders in the `cols` lists. cols = {} missing_indexes = defaultdict(list) for name in names_from_data: cols[name] = [] for ii, row in enumerate(data): try: val = row[name] except KeyError: missing_indexes[name].append(ii) val = MISSING cols[name].append(val) # Fill the missing entries with first values if missing_indexes: for name, indexes in missing_indexes.items(): col = cols[name] first_val = next(val for val in col if val is not MISSING) for index in indexes: col[index] = first_val # prepare initialization if all(name is None for name in names): names = names_from_data self._init_from_dict(cols, names, dtype, n_cols, copy) # Mask the missing values if necessary, converting columns to MaskedColumn # as needed. if missing_indexes: for name, indexes in missing_indexes.items(): col = self[name] # Ensure that any Column subclasses with MISSING values can support # setting masked values. As of astropy 4.0 the test condition below is # always True since _init_from_dict cannot result in mixin columns. if isinstance(col, Column) and not isinstance(col, MaskedColumn): self[name] = self.MaskedColumn(col, copy=False) # Finally do the masking in a mixin-safe way. self[name][indexes] = np.ma.masked return def _init_from_list(self, data, names, dtype, n_cols, copy): """Initialize table from a list of column data. A column can be a Column object, np.ndarray, mixin, or any other iterable object. """ # Special case of initializing an empty table like `t = Table()`. No # action required at this point. if n_cols == 0: return cols = [] default_names = _auto_names(n_cols) for col, name, default_name, dtype in zip(data, names, default_names, dtype): col = self._convert_data_to_col(col, copy, default_name, dtype, name) cols.append(col) self._init_from_cols(cols) def _convert_data_to_col( self, data, copy=True, default_name=None, dtype=None, name=None ): """ Convert any allowed sequence data ``col`` to a column object that can be used directly in the self.columns dict. This could be a Column, MaskedColumn, or mixin column. The final column name is determined by:: name or data.info.name or def_name If ``data`` has no ``info`` then ``name = name or def_name``. The behavior of ``copy`` for Column objects is: - copy=True: new class instance with a copy of data and deep copy of meta - copy=False: new class instance with same data and a key-only copy of meta For mixin columns: - copy=True: new class instance with copy of data and deep copy of meta - copy=False: original instance (no copy at all) Parameters ---------- data : object (column-like sequence) Input column data copy : bool Make a copy default_name : str Default name dtype : np.dtype or None Data dtype name : str or None Column name Returns ------- col : Column, MaskedColumn, mixin-column type Object that can be used as a column in self """ data_is_mixin = self._is_mixin_for_table(data) masked_col_cls = ( self.ColumnClass if issubclass(self.ColumnClass, self.MaskedColumn) else self.MaskedColumn ) try: data0_is_mixin = self._is_mixin_for_table(data[0]) except Exception: # Need broad exception, cannot predict what data[0] raises for arbitrary data data0_is_mixin = False # If the data is not an instance of Column or a mixin class, we can # check the registry of mixin 'handlers' to see if the column can be # converted to a mixin class if (handler := get_mixin_handler(data)) is not None: original_data = data data = handler(data) if not (data_is_mixin := self._is_mixin_for_table(data)): fully_qualified_name = ( original_data.__class__.__module__ + "." + original_data.__class__.__name__ ) raise TypeError( "Mixin handler for object of type " f"{fully_qualified_name} " "did not return a valid mixin column" ) # Get the final column name using precedence. Some objects may not # have an info attribute. Also avoid creating info as a side effect. if not name: if isinstance(data, Column): name = data.name or default_name elif "info" in getattr(data, "__dict__", ()): name = data.info.name or default_name else: name = default_name if isinstance(data, Column): # If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass, # otherwise just use the original class. The most common case is a # table with masked=True and ColumnClass=MaskedColumn. Then a Column # gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior # of downgrading from MaskedColumn to Column (for non-masked table) # does not happen. col_cls = self._get_col_cls_for_table(data) elif data_is_mixin: # Copy the mixin column attributes if they exist since the copy below # may not get this attribute. If not copying, take a slice # to ensure we get a new instance and we do not share metadata # like info. col = col_copy(data, copy_indices=self._init_indices) if copy else data[:] col.info.name = name return col elif data0_is_mixin: # Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m]. try: col = data[0].__class__(data) col.info.name = name return col except Exception: # If that didn't work for some reason, just turn it into np.array of object data = np.array(data, dtype=object) col_cls = self.ColumnClass elif isinstance(data, (np.ma.MaskedArray, Masked)): # Require that col_cls be a subclass of MaskedColumn, remembering # that ColumnClass could be a user-defined subclass (though more-likely # could be MaskedColumn). col_cls = masked_col_cls elif data is None: # Special case for data passed as the None object (for broadcasting # to an object column). Need to turn data into numpy `None` scalar # object, otherwise `Column` interprets data=None as no data instead # of a object column of `None`. data = np.array(None) col_cls = self.ColumnClass elif not hasattr(data, "dtype"): # `data` is none of the above, convert to numpy array or MaskedArray # assuming only that it is a scalar or sequence or N-d nested # sequence. This function is relatively intricate and tries to # maintain performance for common cases while handling things like # list input with embedded np.ma.masked entries. If `data` is a # scalar then it gets returned unchanged so the original object gets # passed to `Column` later. data = _convert_sequence_data_to_array(data, dtype) copy = False # Already made a copy above col_cls = ( masked_col_cls if isinstance(data, np.ma.MaskedArray) else self.ColumnClass ) else: col_cls = self.ColumnClass try: col = col_cls( name=name, data=data, dtype=dtype, copy=copy, copy_indices=self._init_indices, ) except Exception: # Broad exception class since we don't know what might go wrong raise ValueError("unable to convert data to Column for Table") col = self._convert_col_for_table(col) return col def _init_from_ndarray(self, data, names, dtype, n_cols, copy): """Initialize table from an ndarray structured array""" data_names = data.dtype.names or _auto_names(n_cols) struct = data.dtype.names is not None names = [name or data_names[i] for i, name in enumerate(names)] cols = ( [data[name] for name in data_names] if struct else [data[:, i] for i in range(n_cols)] ) self._init_from_list(cols, names, dtype, n_cols, copy) def _init_from_dict(self, data, names, dtype, n_cols, copy): """Initialize table from a dictionary of columns""" data_list = [data[name] for name in names] self._init_from_list(data_list, names, dtype, n_cols, copy) def _get_col_cls_for_table(self, col): """Get the correct column class to use for upgrading any Column-like object. For a masked table, ensure any Column-like object is a subclass of the table MaskedColumn. For unmasked table, ensure any MaskedColumn-like object is a subclass of the table MaskedColumn. If not a MaskedColumn, then ensure that any Column-like object is a subclass of the table Column. """ col_cls = col.__class__ if self.masked: if isinstance(col, Column) and not isinstance(col, self.MaskedColumn): col_cls = self.MaskedColumn else: if isinstance(col, MaskedColumn): if not isinstance(col, self.MaskedColumn): col_cls = self.MaskedColumn elif isinstance(col, Column) and not isinstance(col, self.Column): col_cls = self.Column return col_cls def _convert_col_for_table(self, col): """ Make sure that all Column objects have correct base class for this type of Table. For a base Table this most commonly means setting to MaskedColumn if the table is masked. Table subclasses like QTable override this method. """ if isinstance(col, Column) and not isinstance(col, self.ColumnClass): col_cls = self._get_col_cls_for_table(col) if col_cls is not col.__class__: col = col_cls(col, copy=False) return col def _init_from_cols(self, cols): """Initialize table from a list of Column or mixin objects""" lengths = {len(col) for col in cols} if len(lengths) > 1: raise ValueError(f"Inconsistent data column lengths: {lengths}") # Make sure that all Column-based objects have correct class. For # plain Table this is self.ColumnClass, but for instance QTable will # convert columns with units to a Quantity mixin. newcols = [self._convert_col_for_table(col) for col in cols] self._make_table_from_cols(self, newcols) # Deduplicate indices. It may happen that after pickling or when # initing from an existing table that column indices which had been # references to a single index object got *copied* into an independent # object. This results in duplicates which will cause downstream problems. index_dict = {} for col in self.itercols(): for i, index in enumerate(col.info.indices or []): names = tuple(ind_col.info.name for ind_col in index.columns) if names in index_dict: col.info.indices[i] = index_dict[names] else: index_dict[names] = index def _new_from_slice(self, slice_): """Create a new table as a referenced slice from self.""" table = self.__class__(masked=self.masked) if self.meta: table.meta = self.meta.copy() # Shallow copy for slice table.primary_key = self.primary_key newcols = [] for col in self.columns.values(): newcol = col[slice_] # Note in line below, use direct attribute access to col.indices for Column # instances instead of the generic col.info.indices. This saves about 4 usec # per column. if (col if isinstance(col, Column) else col.info).indices: # TODO : as far as I can tell the only purpose of setting _copy_indices # here is to communicate that to the initial test in `slice_indices`. # Why isn't that just sent as an arg to the function? col.info._copy_indices = self._copy_indices newcol = col.info.slice_indices(newcol, slice_, len(col)) # Don't understand why this is forcing a value on the original column. # Normally col.info does not even have a _copy_indices attribute. Tests # still pass if this line is deleted. (Each col.info attribute access # is expensive). col.info._copy_indices = True newcols.append(newcol) self._make_table_from_cols( table, newcols, verify=False, names=self.columns.keys() ) return table @staticmethod def _make_table_from_cols(table, cols, verify=True, names=None): """ Make ``table`` in-place so that it represents the given list of ``cols``. """ if names is None: names = [col.info.name for col in cols] # Note: we do not test for len(names) == len(cols) if names is not None. In that # case the function is being called by from "trusted" source (e.g. right above here) # that is assumed to provide valid inputs. In that case verify=False. if verify: if None in names: raise TypeError("Cannot have None for column name") if len(set(names)) != len(names): raise ValueError("Duplicate column names") table.columns = table.TableColumns( (name, col) for name, col in zip(names, cols) ) for col in cols: table._set_col_parent_table_and_mask(col) def _set_col_parent_table_and_mask(self, col): """ Set ``col.parent_table = self`` and force ``col`` to have ``mask`` attribute if the table is masked and ``col.mask`` does not exist. """ # For Column instances it is much faster to do direct attribute access # instead of going through .info col_info = col if isinstance(col, Column) else col.info col_info.parent_table = self # Legacy behavior for masked table if self.masked and not hasattr(col, "mask"): col.mask = FalseArray(col.shape) def itercols(self): """ Iterate over the columns of this table. Examples -------- To iterate over the columns of a table:: >>> t = Table([[1], [2]]) >>> for col in t.itercols(): ... print(col) col0 ---- 1 col1 ---- 2 Using ``itercols()`` is similar to ``for col in t.columns.values()`` but is syntactically preferred. """ for colname in self.columns: yield self[colname] def _base_repr_( self, html=False, descr_vals=None, max_width=None, tableid=None, show_dtype=True, max_lines=None, tableclass=None, ): if descr_vals is None: descr_vals = [self.__class__.__name__] if self.masked: descr_vals.append("masked=True") descr_vals.append(f"length={len(self)}") descr = " ".join(descr_vals) if html: from astropy.utils.xml.writer import xml_escape descr = f"<i>{xml_escape(descr)}</i>\n" else: descr = f"<{descr}>\n" if tableid is None: tableid = f"table{id(self)}" data_lines, outs = self.formatter._pformat_table( self, tableid=tableid, html=html, max_width=max_width, show_name=True, show_unit=None, show_dtype=show_dtype, max_lines=max_lines, tableclass=tableclass, ) out = descr + "\n".join(data_lines) return out def _repr_html_(self): out = self._base_repr_( html=True, max_width=-1, tableclass=conf.default_notebook_table_class ) # Wrap <table> in <div>. This follows the pattern in pandas and allows # table to be scrollable horizontally in VS Code notebook display. out = f"<div>{out}</div>" return out def __repr__(self): return self._base_repr_(html=False, max_width=None) def __str__(self): return "\n".join(self.pformat()) def __bytes__(self): return str(self).encode("utf-8") @property def has_mixin_columns(self): """ True if table has any mixin columns (defined as columns that are not Column subclasses). """ return any(has_info_class(col, MixinInfo) for col in self.columns.values()) @property def has_masked_columns(self): """True if table has any ``MaskedColumn`` columns. This does not check for mixin columns that may have masked values, use the ``has_masked_values`` property in that case. """ return any(isinstance(col, MaskedColumn) for col in self.itercols()) @property def has_masked_values(self): """True if column in the table has values which are masked. This may be relatively slow for large tables as it requires checking the mask values of each column. """ for col in self.itercols(): if hasattr(col, "mask") and np.any(col.mask): return True else: return False def _is_mixin_for_table(self, col): """ Determine if ``col`` should be added to the table directly as a mixin column. """ if isinstance(col, BaseColumn): return False # Is it a mixin but not [Masked]Quantity (which gets converted to # [Masked]Column with unit set). return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo) @format_doc(_pprint_docs) def pprint( self, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, align=None, ): """Print a formatted string representation of the table. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default is taken from the configuration item ``astropy.conf.max_lines``. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for max_width except the configuration item is ``astropy.conf.max_width``. """ lines, outs = self.formatter._pformat_table( self, max_lines, max_width, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, align=align, ) if outs["show_length"]: lines.append(f"Length = {len(self)} rows") n_header = outs["n_header"] for i, line in enumerate(lines): if i < n_header: color_print(line, "red") else: print(line) @format_doc(_pprint_docs) def pprint_all( self, max_lines=-1, max_width=-1, show_name=True, show_unit=None, show_dtype=False, align=None, ): """Print a formatted string representation of the entire table. This method is the same as `astropy.table.Table.pprint` except that the default ``max_lines`` and ``max_width`` are both -1 so that by default the entire table is printed instead of restricting to the size of the screen terminal. """ return self.pprint( max_lines, max_width, show_name, show_unit, show_dtype, align ) def _make_index_row_display_table(self, index_row_name): if index_row_name not in self.columns: idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self))) return self.__class__([idx_col] + list(self.columns.values()), copy=False) else: return self def show_in_notebook( self, tableid=None, css=None, display_length=50, table_class="astropy-default", show_row_index="idx", ): """Render the table in HTML and show it in the IPython notebook. Parameters ---------- tableid : str or None An html ID tag for the table. Default is ``table{id}-XXX``, where id is the unique integer id of the table object, id(self), and XXX is a random number to avoid conflicts when printing the same table multiple times. table_class : str or None A string with a list of HTML classes used to style the table. The special default string ('astropy-default') means that the string will be retrieved from the configuration item ``astropy.table.default_notebook_table_class``. Note that these table classes may make use of bootstrap, as this is loaded with the notebook. See `this page <https://getbootstrap.com/css/#tables>`_ for the list of classes. css : str A valid CSS string declaring the formatting for the table. Defaults to ``astropy.table.jsviewer.DEFAULT_CSS_NB``. display_length : int, optional Number or rows to show. Defaults to 50. show_row_index : str or False If this does not evaluate to False, a column with the given name will be added to the version of the table that gets displayed. This new column shows the index of the row in the table itself, even when the displayed table is re-sorted by another column. Note that if a column with this name already exists, this option will be ignored. Defaults to "idx". Notes ----- Currently, unlike `show_in_browser` (with ``jsviewer=True``), this method needs to access online javascript code repositories. This is due to modern browsers' limitations on accessing local files. Hence, if you call this method while offline (and don't have a cached version of jquery and jquery.dataTables), you will not get the jsviewer features. """ from IPython.display import HTML from .jsviewer import JSViewer if tableid is None: tableid = f"table{id(self)}-{np.random.randint(1, 1e6)}" jsv = JSViewer(display_length=display_length) if show_row_index: display_table = self._make_index_row_display_table(show_row_index) else: display_table = self if table_class == "astropy-default": table_class = conf.default_notebook_table_class html = display_table._base_repr_( html=True, max_width=-1, tableid=tableid, max_lines=-1, show_dtype=False, tableclass=table_class, ) columns = display_table.columns.values() sortable_columns = [ i for i, col in enumerate(columns) if col.info.dtype.kind in "iufc" ] html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns) return HTML(html) def show_in_browser( self, max_lines=5000, jsviewer=False, browser="default", jskwargs={"use_local_files": True}, tableid=None, table_class="display compact", css=None, show_row_index="idx", ): """Render the table in HTML and show it in a web browser. Parameters ---------- max_lines : int Maximum number of rows to export to the table (set low by default to avoid memory issues, since the browser view requires duplicating the table in memory). A negative value of ``max_lines`` indicates no row limit. jsviewer : bool If `True`, prepends some javascript headers so that the table is rendered as a `DataTables <https://datatables.net>`_ data table. This allows in-browser searching & sorting. browser : str Any legal browser name, e.g. ``'firefox'``, ``'chrome'``, ``'safari'`` (for mac, you may need to use ``'open -a "/Applications/Google Chrome.app" {}'`` for Chrome). If ``'default'``, will use the system default browser. jskwargs : dict Passed to the `astropy.table.JSViewer` init. Defaults to ``{'use_local_files': True}`` which means that the JavaScript libraries will be served from local copies. tableid : str or None An html ID tag for the table. Default is ``table{id}``, where id is the unique integer id of the table object, id(self). table_class : str or None A string with a list of HTML classes used to style the table. Default is "display compact", and other possible values can be found in https://www.datatables.net/manual/styling/classes css : str A valid CSS string declaring the formatting for the table. Defaults to ``astropy.table.jsviewer.DEFAULT_CSS``. show_row_index : str or False If this does not evaluate to False, a column with the given name will be added to the version of the table that gets displayed. This new column shows the index of the row in the table itself, even when the displayed table is re-sorted by another column. Note that if a column with this name already exists, this option will be ignored. Defaults to "idx". """ import os import tempfile import webbrowser from urllib.parse import urljoin from urllib.request import pathname2url from .jsviewer import DEFAULT_CSS if css is None: css = DEFAULT_CSS # We can't use NamedTemporaryFile here because it gets deleted as # soon as it gets garbage collected. tmpdir = tempfile.mkdtemp() path = os.path.join(tmpdir, "table.html") with open(path, "w") as tmp: if jsviewer: if show_row_index: display_table = self._make_index_row_display_table(show_row_index) else: display_table = self display_table.write( tmp, format="jsviewer", css=css, max_lines=max_lines, jskwargs=jskwargs, table_id=tableid, table_class=table_class, ) else: self.write(tmp, format="html") try: br = webbrowser.get(None if browser == "default" else browser) except webbrowser.Error: log.error(f"Browser '{browser}' not found.") else: br.open(urljoin("file:", pathname2url(path))) @format_doc(_pformat_docs, id="{id}") def pformat( self, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, html=False, tableid=None, align=None, tableclass=None, ): """Return a list of lines for the formatted string representation of the table. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default is taken from the configuration item ``astropy.conf.max_lines``. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for ``max_width`` except the configuration item is ``astropy.conf.max_width``. """ lines, outs = self.formatter._pformat_table( self, max_lines, max_width, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, html=html, tableid=tableid, tableclass=tableclass, align=align, ) if outs["show_length"]: lines.append(f"Length = {len(self)} rows") return lines @format_doc(_pformat_docs, id="{id}") def pformat_all( self, max_lines=-1, max_width=-1, show_name=True, show_unit=None, show_dtype=False, html=False, tableid=None, align=None, tableclass=None, ): """Return a list of lines for the formatted string representation of the entire table. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default is taken from the configuration item ``astropy.conf.max_lines``. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for ``max_width`` except the configuration item is ``astropy.conf.max_width``. """ return self.pformat( max_lines, max_width, show_name, show_unit, show_dtype, html, tableid, align, tableclass, ) def more( self, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, ): """Interactively browse table with a paging interface. Supported keys:: f, <space> : forward one page b : back one page r : refresh same page n : next row p : previous row < : go to beginning > : go to end q : quit browsing h : print this help Parameters ---------- max_lines : int Maximum number of lines in table output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is False. """ self.formatter._more_tabcol( self, max_lines, max_width, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, ) def __getitem__(self, item): if isinstance(item, str): return self.columns[item] elif isinstance(item, (int, np.integer)): return self.Row(self, item) elif ( isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i" ): return self.Row(self, item.item()) elif self._is_list_or_tuple_of_str(item): out = self.__class__( [self[x] for x in item], copy_indices=self._copy_indices ) out._groups = groups.TableGroups( out, indices=self.groups._indices, keys=self.groups._keys ) out.meta = self.meta.copy() # Shallow copy for meta return out elif (isinstance(item, np.ndarray) and item.size == 0) or ( isinstance(item, (tuple, list)) and not item ): # If item is an empty array/list/tuple then return the table with no rows return self._new_from_slice([]) elif ( isinstance(item, slice) or isinstance(item, np.ndarray) or isinstance(item, list) or isinstance(item, tuple) and all(isinstance(x, np.ndarray) for x in item) ): # here for the many ways to give a slice; a tuple of ndarray # is produced by np.where, as in t[np.where(t['a'] > 2)] # For all, a new table is constructed with slice of all columns return self._new_from_slice(item) else: raise ValueError(f"Illegal type {type(item)} for table item access") def __setitem__(self, item, value): # If the item is a string then it must be the name of a column. # If that column doesn't already exist then create it now. if isinstance(item, str) and item not in self.colnames: self.add_column(value, name=item, copy=True) else: n_cols = len(self.columns) if isinstance(item, str): # Set an existing column by first trying to replace, and if # this fails do an in-place update. See definition of mask # property for discussion of the _setitem_inplace attribute. if ( not getattr(self, "_setitem_inplace", False) and not conf.replace_inplace ): try: self._replace_column_warnings(item, value) return except Exception: pass self.columns[item][:] = value elif isinstance(item, (int, np.integer)): self._set_row(idx=item, colnames=self.colnames, vals=value) elif ( isinstance(item, slice) or isinstance(item, np.ndarray) or isinstance(item, list) or ( isinstance(item, tuple) # output from np.where and all(isinstance(x, np.ndarray) for x in item) ) ): if isinstance(value, Table): vals = (col for col in value.columns.values()) elif isinstance(value, np.ndarray) and value.dtype.names: vals = (value[name] for name in value.dtype.names) elif np.isscalar(value): vals = itertools.repeat(value, n_cols) else: # Assume this is an iterable that will work if len(value) != n_cols: raise ValueError( "Right side value needs {} elements (one for each column)".format( n_cols ) ) vals = value for col, val in zip(self.columns.values(), vals): col[item] = val else: raise ValueError(f"Illegal type {type(item)} for table item access") def __delitem__(self, item): if isinstance(item, str): self.remove_column(item) elif isinstance(item, (int, np.integer)): self.remove_row(item) elif isinstance(item, (list, tuple, np.ndarray)) and all( isinstance(x, str) for x in item ): self.remove_columns(item) elif ( isinstance(item, (list, np.ndarray)) and np.asarray(item).dtype.kind == "i" ): self.remove_rows(item) elif isinstance(item, slice): self.remove_rows(item) else: raise IndexError("illegal key or index value") def _ipython_key_completions_(self): return self.colnames def field(self, item): """Return column[item] for recarray compatibility.""" return self.columns[item] @property def masked(self): return self._masked @masked.setter def masked(self, masked): raise Exception( "Masked attribute is read-only (use t = Table(t, masked=True)" " to convert to a masked table)" ) def _set_masked(self, masked): """ Set the table masked property. Parameters ---------- masked : bool State of table masking (`True` or `False`) """ if masked in [True, False, None]: self._masked = masked else: raise ValueError("masked should be one of True, False, None") self._column_class = self.MaskedColumn if self._masked else self.Column @property def ColumnClass(self): if self._column_class is None: return self.Column else: return self._column_class @property def dtype(self): return np.dtype([descr(col) for col in self.columns.values()]) @property def colnames(self): return list(self.columns.keys()) @staticmethod def _is_list_or_tuple_of_str(names): """Check that ``names`` is a tuple or list of strings""" return ( isinstance(names, (tuple, list)) and names and all(isinstance(x, str) for x in names) ) def keys(self): return list(self.columns.keys()) def values(self): return self.columns.values() def items(self): return self.columns.items() def __len__(self): # For performance reasons (esp. in Row) cache the first column name # and use that subsequently for the table length. If might not be # available yet or the column might be gone now, in which case # try again in the except block. try: return len(OrderedDict.__getitem__(self.columns, self._first_colname)) except (AttributeError, KeyError): if len(self.columns) == 0: return 0 # Get the first column name self._first_colname = next(iter(self.columns)) return len(self.columns[self._first_colname]) def index_column(self, name): """ Return the positional index of column ``name``. Parameters ---------- name : str column name Returns ------- index : int Positional index of column ``name``. Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Get index of column 'b' of the table:: >>> t.index_column('b') 1 """ try: return self.colnames.index(name) except ValueError: raise ValueError(f"Column {name} does not exist") def add_column( self, col, index=None, name=None, rename_duplicate=False, copy=True, default_name=None, ): """ Add a new column to the table using ``col`` as input. If ``index`` is supplied then insert column before ``index`` position in the list of columns, otherwise append column to the end of the list. The ``col`` input can be any data object which is acceptable as a `~astropy.table.Table` column object or can be converted. This includes mixin columns and scalar or length=1 objects which get broadcast to match the table length. To add several columns at once use ``add_columns()`` or simply call ``add_column()`` for each one. There is very little performance difference in the two approaches. Parameters ---------- col : object Data object for the new column index : int or None Insert column before this position or at end (default). name : str Column name rename_duplicate : bool Uniquify column name if it already exist. Default is False. copy : bool Make a copy of the new column. Default is True. default_name : str or None Name to use if both ``name`` and ``col.info.name`` are not available. Defaults to ``col{number_of_columns}``. Examples -------- Create a table with two columns 'a' and 'b', then create a third column 'c' and append it to the end of the table:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> col_c = Column(name='c', data=['x', 'y']) >>> t.add_column(col_c) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y Add column 'd' at position 1. Note that the column is inserted before the given index:: >>> t.add_column(['a', 'b'], name='d', index=1) >>> print(t) a d b c --- --- --- --- 1 a 0.1 x 2 b 0.2 y Add second column named 'b' with rename_duplicate:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_column(1.1, name='b', rename_duplicate=True) >>> print(t) a b b_1 --- --- --- 1 0.1 1.1 2 0.2 1.1 Add an unnamed column or mixin object in the table using a default name or by specifying an explicit name with ``name``. Name can also be overridden:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_column(['a', 'b']) >>> t.add_column(col_c, name='d') >>> print(t) a b col2 d --- --- ---- --- 1 0.1 a x 2 0.2 b y """ if default_name is None: default_name = f"col{len(self.columns)}" # Convert col data to acceptable object for insertion into self.columns. # Note that along with the lines above and below, this allows broadcasting # of scalars to the correct shape for adding to table. col = self._convert_data_to_col( col, name=name, copy=copy, default_name=default_name ) # Assigning a scalar column to an empty table should result in an # exception (see #3811). if col.shape == () and len(self) == 0: raise TypeError("Empty table cannot have column set to scalar value") # Make col data shape correct for scalars. The second test is to allow # broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]]. elif (col.shape == () or col.shape[0] == 1) and len(self) > 0: new_shape = (len(self),) + getattr(col, "shape", ())[1:] if isinstance(col, np.ndarray): col = np.broadcast_to(col, shape=new_shape, subok=True) elif isinstance(col, ShapedLikeNDArray): col = col._apply(np.broadcast_to, shape=new_shape, subok=True) # broadcast_to() results in a read-only array. Apparently it only changes # the view to look like the broadcasted array. So copy. col = col_copy(col) name = col.info.name # Ensure that new column is the right length if len(self.columns) > 0 and len(col) != len(self): raise ValueError("Inconsistent data column lengths") if rename_duplicate: orig_name = name i = 1 while name in self.columns: # Iterate until a unique name is found name = orig_name + "_" + str(i) i += 1 col.info.name = name # Set col parent_table weakref and ensure col has mask attribute if table.masked self._set_col_parent_table_and_mask(col) # Add new column as last column self.columns[name] = col if index is not None: # Move the other cols to the right of the new one move_names = self.colnames[index:-1] for move_name in move_names: self.columns.move_to_end(move_name, last=True) def add_columns( self, cols, indexes=None, names=None, copy=True, rename_duplicate=False ): """ Add a list of new columns the table using ``cols`` data objects. If a corresponding list of ``indexes`` is supplied then insert column before each ``index`` position in the *original* list of columns, otherwise append columns to the end of the list. The ``cols`` input can include any data objects which are acceptable as `~astropy.table.Table` column objects or can be converted. This includes mixin columns and scalar or length=1 objects which get broadcast to match the table length. From a performance perspective there is little difference between calling this method once or looping over the new columns and calling ``add_column()`` for each column. Parameters ---------- cols : list of object List of data objects for the new columns indexes : list of int or None Insert column before this position or at end (default). names : list of str Column names copy : bool Make a copy of the new columns. Default is True. rename_duplicate : bool Uniquify new column names if they duplicate the existing ones. Default is False. See Also -------- astropy.table.hstack, update, replace_column Examples -------- Create a table with two columns 'a' and 'b', then create columns 'c' and 'd' and append them to the end of the table:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> col_c = Column(name='c', data=['x', 'y']) >>> col_d = Column(name='d', data=['u', 'v']) >>> t.add_columns([col_c, col_d]) >>> print(t) a b c d --- --- --- --- 1 0.1 x u 2 0.2 y v Add column 'c' at position 0 and column 'd' at position 1. Note that the columns are inserted before the given position:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'], ... indexes=[0, 1]) >>> print(t) c a d b --- --- --- --- x 1 u 0.1 y 2 v 0.2 Add second column 'b' and column 'c' with ``rename_duplicate``:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'), ... rename_duplicate=True) >>> print(t) a b b_1 c --- --- --- --- 1 0.1 1.1 x 2 0.2 1.2 y Add unnamed columns or mixin objects in the table using default names or by specifying explicit names with ``names``. Names can also be overridden:: >>> t = Table() >>> col_b = Column(name='b', data=['u', 'v']) >>> t.add_columns([[1, 2], col_b]) >>> t.add_columns([[3, 4], col_b], names=['c', 'd']) >>> print(t) col0 b c d ---- --- --- --- 1 u 3 u 2 v 4 v """ if indexes is None: indexes = [len(self.columns)] * len(cols) elif len(indexes) != len(cols): raise ValueError("Number of indexes must match number of cols") if names is None: names = (None,) * len(cols) elif len(names) != len(cols): raise ValueError("Number of names must match number of cols") default_names = [f"col{ii + len(self.columns)}" for ii in range(len(cols))] for ii in reversed(np.argsort(indexes, kind="stable")): self.add_column( cols[ii], index=indexes[ii], name=names[ii], default_name=default_names[ii], rename_duplicate=rename_duplicate, copy=copy, ) def _replace_column_warnings(self, name, col): """ Same as replace_column but issues warnings under various circumstances. """ warns = conf.replace_warnings refcount = None old_col = None if "refcount" in warns and name in self.colnames: refcount = sys.getrefcount(self[name]) if name in self.colnames: old_col = self[name] # This may raise an exception (e.g. t['a'] = 1) in which case none of # the downstream code runs. self.replace_column(name, col) if "always" in warns: warnings.warn( f"replaced column '{name}'", TableReplaceWarning, stacklevel=3 ) if "slice" in warns: try: # Check for ndarray-subclass slice. An unsliced instance # has an ndarray for the base while sliced has the same class # as parent. if isinstance(old_col.base, old_col.__class__): msg = ( "replaced column '{}' which looks like an array slice. " "The new column no longer shares memory with the " "original array.".format(name) ) warnings.warn(msg, TableReplaceWarning, stacklevel=3) except AttributeError: pass if "refcount" in warns: # Did reference count change? new_refcount = sys.getrefcount(self[name]) if refcount != new_refcount: msg = ( "replaced column '{}' and the number of references " "to the column changed.".format(name) ) warnings.warn(msg, TableReplaceWarning, stacklevel=3) if "attributes" in warns: # Any of the standard column attributes changed? changed_attrs = [] new_col = self[name] # Check base DataInfo attributes that any column will have for attr in DataInfo.attr_names: if getattr(old_col.info, attr) != getattr(new_col.info, attr): changed_attrs.append(attr) if changed_attrs: msg = "replaced column '{}' and column attributes {} changed.".format( name, changed_attrs ) warnings.warn(msg, TableReplaceWarning, stacklevel=3) def replace_column(self, name, col, copy=True): """ Replace column ``name`` with the new ``col`` object. The behavior of ``copy`` for Column objects is: - copy=True: new class instance with a copy of data and deep copy of meta - copy=False: new class instance with same data and a key-only copy of meta For mixin columns: - copy=True: new class instance with copy of data and deep copy of meta - copy=False: original instance (no copy at all) Parameters ---------- name : str Name of column to replace col : `~astropy.table.Column` or `~numpy.ndarray` or sequence New column object to replace the existing column. copy : bool Make copy of the input ``col``, default=True See Also -------- add_columns, astropy.table.hstack, update Examples -------- Replace column 'a' with a float version of itself:: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) >>> float_a = t['a'].astype(float) >>> t.replace_column('a', float_a) """ if name not in self.colnames: raise ValueError(f"column name {name} is not in the table") if self[name].info.indices: raise ValueError("cannot replace a table index column") col = self._convert_data_to_col(col, name=name, copy=copy) self._set_col_parent_table_and_mask(col) # Ensure that new column is the right length, unless it is the only column # in which case re-sizing is allowed. if len(self.columns) > 1 and len(col) != len(self[name]): raise ValueError("length of new column must match table length") self.columns.__setitem__(name, col, validated=True) def remove_row(self, index): """ Remove a row from the table. Parameters ---------- index : int Index of row to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove row 1 from the table:: >>> t.remove_row(1) >>> print(t) a b c --- --- --- 1 0.1 x 3 0.3 z To remove several rows at the same time use remove_rows. """ # check the index against the types that work with np.delete if not isinstance(index, (int, np.integer)): raise TypeError("Row index must be an integer") self.remove_rows(index) def remove_rows(self, row_specifier): """ Remove rows from the table. Parameters ---------- row_specifier : slice or int or array of int Specification for rows to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove rows 0 and 2 from the table:: >>> t.remove_rows([0, 2]) >>> print(t) a b c --- --- --- 2 0.2 y Note that there are no warnings if the slice operator extends outside the data:: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> t.remove_rows(slice(10, 20, 1)) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z """ # Update indices for index in self.indices: index.remove_rows(row_specifier) keep_mask = np.ones(len(self), dtype=bool) keep_mask[row_specifier] = False columns = self.TableColumns() for name, col in self.columns.items(): newcol = col[keep_mask] newcol.info.parent_table = self columns[name] = newcol self._replace_cols(columns) # Revert groups to default (ungrouped) state if hasattr(self, "_groups"): del self._groups def iterrows(self, *names): """ Iterate over rows of table returning a tuple of values for each row. This method is especially useful when only a subset of columns are needed. The ``iterrows`` method can be substantially faster than using the standard Table row iteration (e.g. ``for row in tbl:``), since that returns a new ``~astropy.table.Row`` object for each row and accessing a column in that row (e.g. ``row['col0']``) is slower than tuple access. Parameters ---------- names : list List of column names (default to all columns if no names provided) Returns ------- rows : iterable Iterator returns tuples of row values Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table({'a': [1, 2, 3], ... 'b': [1.0, 2.5, 3.0], ... 'c': ['x', 'y', 'z']}) To iterate row-wise using column names:: >>> for a, c in t.iterrows('a', 'c'): ... print(a, c) 1 x 2 y 3 z """ if len(names) == 0: names = self.colnames else: for name in names: if name not in self.colnames: raise ValueError(f"{name} is not a valid column name") cols = (self[name] for name in names) out = zip(*cols) return out def _set_of_names_in_colnames(self, names): """Return ``names`` as a set if valid, or raise a `KeyError`. ``names`` is valid if all elements in it are in ``self.colnames``. If ``names`` is a string then it is interpreted as a single column name. """ names = {names} if isinstance(names, str) else set(names) invalid_names = names.difference(self.colnames) if len(invalid_names) == 1: raise KeyError(f'column "{invalid_names.pop()}" does not exist') elif len(invalid_names) > 1: raise KeyError(f"columns {invalid_names} do not exist") return names def remove_column(self, name): """ Remove a column from the table. This can also be done with:: del table[name] Parameters ---------- name : str Name of column to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove column 'b' from the table:: >>> t.remove_column('b') >>> print(t) a c --- --- 1 x 2 y 3 z To remove several columns at the same time use remove_columns. """ self.remove_columns([name]) def remove_columns(self, names): """ Remove several columns from the table. Parameters ---------- names : str or iterable of str Names of the columns to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove columns 'b' and 'c' from the table:: >>> t.remove_columns(['b', 'c']) >>> print(t) a --- 1 2 3 Specifying only a single column also works. Remove column 'b' from the table:: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> t.remove_columns('b') >>> print(t) a c --- --- 1 x 2 y 3 z This gives the same as using remove_column. """ for name in self._set_of_names_in_colnames(names): del self.columns[name] def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func): """ Convert string-like columns to/from bytestring and unicode (internal only). Parameters ---------- in_kind : str Input dtype.kind out_kind : str Output dtype.kind """ for col in self.itercols(): if col.dtype.kind == in_kind: try: # This requires ASCII and is faster by a factor of up to ~8, so # try that first. newcol = col.__class__(col, dtype=out_kind) except (UnicodeEncodeError, UnicodeDecodeError): newcol = col.__class__(encode_decode_func(col, "utf-8")) # Quasi-manually copy info attributes. Unfortunately # DataInfo.__set__ does not do the right thing in this case # so newcol.info = col.info does not get the old info attributes. for attr in ( col.info.attr_names - col.info._attrs_no_copy - {"dtype"} ): value = deepcopy(getattr(col.info, attr)) setattr(newcol.info, attr, value) self[col.name] = newcol def convert_bytestring_to_unicode(self): """ Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') using UTF-8 encoding. Internally this changes string columns to represent each character in the string with a 4-byte UCS-4 equivalent, so it is inefficient for memory but allows scripts to manipulate string arrays with natural syntax. """ self._convert_string_dtype("S", "U", np.char.decode) def convert_unicode_to_bytestring(self): """ Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S') using UTF-8 encoding. When exporting a unicode string array to a file, it may be desirable to encode unicode columns as bytestrings. """ self._convert_string_dtype("U", "S", np.char.encode) def keep_columns(self, names): """ Keep only the columns specified (remove the others). Parameters ---------- names : str or iterable of str The columns to keep. All other columns will be removed. Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Keep only column 'a' of the table:: >>> t.keep_columns('a') >>> print(t) a --- 1 2 3 Keep columns 'a' and 'c' of the table:: >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> t.keep_columns(['a', 'c']) >>> print(t) a c --- --- 1 x 2 y 3 z """ names = self._set_of_names_in_colnames(names) for colname in self.colnames: if colname not in names: del self.columns[colname] def rename_column(self, name, new_name): """ Rename a column. This can also be done directly with by setting the ``name`` attribute for a column:: table[name].name = new_name TODO: this won't work for mixins Parameters ---------- name : str The current name of the column. new_name : str The new name for the column Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c')) >>> print(t) a b c --- --- --- 1 3 5 2 4 6 Renaming column 'a' to 'aa':: >>> t.rename_column('a' , 'aa') >>> print(t) aa b c --- --- --- 1 3 5 2 4 6 """ if name not in self.keys(): raise KeyError(f"Column {name} does not exist") self.columns[name].info.name = new_name def rename_columns(self, names, new_names): """ Rename multiple columns. Parameters ---------- names : list, tuple A list or tuple of existing column names. new_names : list, tuple A list or tuple of new column names. Examples -------- Create a table with three columns 'a', 'b', 'c':: >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c')) >>> print(t) a b c --- --- --- 1 3 5 2 4 6 Renaming columns 'a' to 'aa' and 'b' to 'bb':: >>> names = ('a','b') >>> new_names = ('aa','bb') >>> t.rename_columns(names, new_names) >>> print(t) aa bb c --- --- --- 1 3 5 2 4 6 """ if not self._is_list_or_tuple_of_str(names): raise TypeError("input 'names' must be a tuple or a list of column names") if not self._is_list_or_tuple_of_str(new_names): raise TypeError( "input 'new_names' must be a tuple or a list of column names" ) if len(names) != len(new_names): raise ValueError( "input 'names' and 'new_names' list arguments must be the same length" ) for name, new_name in zip(names, new_names): self.rename_column(name, new_name) def _set_row(self, idx, colnames, vals): try: assert len(vals) == len(colnames) except Exception: raise ValueError( "right hand side must be a sequence of values with " "the same length as the number of selected columns" ) # Keep track of original values before setting each column so that # setting row can be transactional. orig_vals = [] cols = self.columns try: for name, val in zip(colnames, vals): orig_vals.append(cols[name][idx]) cols[name][idx] = val except Exception: # If anything went wrong first revert the row update then raise for name, val in zip(colnames, orig_vals[:-1]): cols[name][idx] = val raise def add_row(self, vals=None, mask=None): """Add a new row to the end of the table. The ``vals`` argument can be: sequence (e.g. tuple or list) Column values in the same order as table columns. mapping (e.g. dict) Keys corresponding to column names. Missing values will be filled with np.zeros for the column dtype. `None` All values filled with np.zeros for the column dtype. This method requires that the Table object "owns" the underlying array data. In particular one cannot add a row to a Table that was initialized with copy=False from an existing array. The ``mask`` attribute should give (if desired) the mask for the values. The type of the mask should match that of the values, i.e. if ``vals`` is an iterable, then ``mask`` should also be an iterable with the same length, and if ``vals`` is a mapping, then ``mask`` should be a dictionary. Parameters ---------- vals : tuple, list, dict or None Use the specified values in the new row mask : tuple, list, dict or None Use the specified mask values in the new row Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) >>> print(t) a b c --- --- --- 1 4 7 2 5 8 Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c':: >>> t.add_row([3,6,9]) >>> print(t) a b c --- --- --- 1 4 7 2 5 8 3 6 9 """ self.insert_row(len(self), vals, mask) def insert_row(self, index, vals=None, mask=None): """Add a new row before the given ``index`` position in the table. The ``vals`` argument can be: sequence (e.g. tuple or list) Column values in the same order as table columns. mapping (e.g. dict) Keys corresponding to column names. Missing values will be filled with np.zeros for the column dtype. `None` All values filled with np.zeros for the column dtype. The ``mask`` attribute should give (if desired) the mask for the values. The type of the mask should match that of the values, i.e. if ``vals`` is an iterable, then ``mask`` should also be an iterable with the same length, and if ``vals`` is a mapping, then ``mask`` should be a dictionary. Parameters ---------- vals : tuple, list, dict or None Use the specified values in the new row mask : tuple, list, dict or None Use the specified mask values in the new row """ colnames = self.colnames N = len(self) if index < -N or index > N: raise IndexError( f"Index {index} is out of bounds for table with length {N}" ) if index < 0: index += N if isinstance(vals, Mapping) or vals is None: # From the vals and/or mask mappings create the corresponding lists # that have entries for each table column. if mask is not None and not isinstance(mask, Mapping): raise TypeError("Mismatch between type of vals and mask") # Now check that the mask is specified for the same keys as the # values, otherwise things get really confusing. if mask is not None and set(vals.keys()) != set(mask.keys()): raise ValueError("keys in mask should match keys in vals") if vals and any(name not in colnames for name in vals): raise ValueError("Keys in vals must all be valid column names") vals_list = [] mask_list = [] for name in colnames: if vals and name in vals: vals_list.append(vals[name]) mask_list.append(False if mask is None else mask[name]) else: col = self[name] if hasattr(col, "dtype"): # Make a placeholder zero element of the right type which is masked. # This assumes the appropriate insert() method will broadcast a # numpy scalar to the right shape. vals_list.append(np.zeros(shape=(), dtype=col.dtype)) # For masked table any unsupplied values are masked by default. mask_list.append(self.masked and vals is not None) else: raise ValueError(f"Value must be supplied for column '{name}'") vals = vals_list mask = mask_list if isiterable(vals): if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)): raise TypeError("Mismatch between type of vals and mask") if len(self.columns) != len(vals): raise ValueError("Mismatch between number of vals and columns") if mask is not None: if len(self.columns) != len(mask): raise ValueError("Mismatch between number of masks and columns") else: mask = [False] * len(self.columns) else: raise TypeError("Vals must be an iterable or mapping or None") # Insert val at index for each column columns = self.TableColumns() for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask): try: # If new val is masked and the existing column does not support masking # then upgrade the column to a mask-enabled type: either the table-level # default ColumnClass or else MaskedColumn. if ( mask_ and isinstance(col, Column) and not isinstance(col, MaskedColumn) ): col_cls = ( self.ColumnClass if issubclass(self.ColumnClass, self.MaskedColumn) else self.MaskedColumn ) col = col_cls(col, copy=False) newcol = col.insert(index, val, axis=0) if len(newcol) != N + 1: raise ValueError( "Incorrect length for column {} after inserting {}" " (expected {}, got {})".format(name, val, len(newcol), N + 1) ) newcol.info.parent_table = self # Set mask if needed and possible if mask_: if hasattr(newcol, "mask"): newcol[index] = np.ma.masked else: raise TypeError( "mask was supplied for column '{}' but it does not " "support masked values".format(col.info.name) ) columns[name] = newcol except Exception as err: raise ValueError( "Unable to insert row because of exception in column '{}':\n{}".format( name, err ) ) from err for table_index in self.indices: table_index.insert_row(index, vals, self.columns.values()) self._replace_cols(columns) # Revert groups to default (ungrouped) state if hasattr(self, "_groups"): del self._groups def _replace_cols(self, columns): for col, new_col in zip(self.columns.values(), columns.values()): new_col.info.indices = [] for index in col.info.indices: index.columns[index.col_position(col.info.name)] = new_col new_col.info.indices.append(index) self.columns = columns def update(self, other, copy=True): """ Perform a dictionary-style update and merge metadata. The argument ``other`` must be a |Table|, or something that can be used to initialize a table. Columns from (possibly converted) ``other`` are added to this table. In case of matching column names the column from this table is replaced with the one from ``other``. Parameters ---------- other : table-like Data to update this table with. copy : bool Whether the updated columns should be copies of or references to the originals. See Also -------- add_columns, astropy.table.hstack, replace_column Examples -------- Update a table with another table:: >>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0}) >>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2}) >>> t1.update(t2) >>> t1 <Table length=2> a b c str3 float64 float64 ---- ------- ------- foo 1.0 7.0 bar 2.0 11.0 >>> t1.meta {'i': 0, 'n': 2} Update a table with a dictionary:: >>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}) >>> t.update({'b': [1., 2.]}) >>> t <Table length=2> a b str3 float64 ---- ------- foo 1.0 bar 2.0 """ from .operations import _merge_table_meta if not isinstance(other, Table): other = self.__class__(other, copy=copy) common_cols = set(self.colnames).intersection(other.colnames) for name, col in other.items(): if name in common_cols: self.replace_column(name, col, copy=copy) else: self.add_column(col, name=name, copy=copy) _merge_table_meta(self, [self, other], metadata_conflicts="silent") def argsort(self, keys=None, kind=None, reverse=False): """ Return the indices which would sort the table according to one or more key columns. This simply calls the `numpy.argsort` function on the table with the ``order`` parameter set to ``keys``. Parameters ---------- keys : str or list of str The column name(s) to order the table by kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm used by ``numpy.argsort``. reverse : bool Sort in reverse order (default=False) Returns ------- index_array : ndarray, int Array of indices that sorts the table by the specified key column(s). """ if isinstance(keys, str): keys = [keys] # use index sorted order if possible if keys is not None: index = get_index(self, names=keys) if index is not None: idx = np.asarray(index.sorted_data()) return idx[::-1] if reverse else idx kwargs = {} if keys: # For multiple keys return a structured array which gets sorted, # while for a single key return a single ndarray. Sorting a # one-column structured array is slower than ndarray (e.g. a # factor of ~6 for a 10 million long random array), and much slower # for in principle sortable columns like Time, which get stored as # object arrays. if len(keys) > 1: kwargs["order"] = keys data = self.as_array(names=keys) else: data = self[keys[0]] else: # No keys provided so sort on all columns. data = self.as_array() if kind: kwargs["kind"] = kind # np.argsort will look for a possible .argsort method (e.g., for Time), # and if that fails cast to an array and try sorting that way. idx = np.argsort(data, **kwargs) return idx[::-1] if reverse else idx def sort(self, keys=None, *, kind=None, reverse=False): """ Sort the table according to one or more keys. This operates on the existing table and does not return a new table. Parameters ---------- keys : str or list of str The key(s) to order the table by. If None, use the primary index of the Table. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm used by ``numpy.argsort``. reverse : bool Sort in reverse order (default=False) Examples -------- Create a table with 3 columns:: >>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'], ... [12, 15, 18]], names=('firstname', 'name', 'tel')) >>> print(t) firstname name tel --------- ------- --- Max Miller 12 Jo Miller 15 John Jackson 18 Sorting according to standard sorting rules, first 'name' then 'firstname':: >>> t.sort(['name', 'firstname']) >>> print(t) firstname name tel --------- ------- --- John Jackson 18 Jo Miller 15 Max Miller 12 Sorting according to standard sorting rules, first 'firstname' then 'tel', in reverse order:: >>> t.sort(['firstname', 'tel'], reverse=True) >>> print(t) firstname name tel --------- ------- --- Max Miller 12 John Jackson 18 Jo Miller 15 """ if keys is None: if not self.indices: raise ValueError("Table sort requires input keys or a table index") keys = [x.info.name for x in self.indices[0].columns] if isinstance(keys, str): keys = [keys] indexes = self.argsort(keys, kind=kind, reverse=reverse) with self.index_mode("freeze"): for name, col in self.columns.items(): # Make a new sorted column. This requires that take() also copies # relevant info attributes for mixin columns. new_col = col.take(indexes, axis=0) # First statement in try: will succeed if the column supports an in-place # update, and matches the legacy behavior of astropy Table. However, # some mixin classes may not support this, so in that case just drop # in the entire new column. See #9553 and #9536 for discussion. try: col[:] = new_col except Exception: # In-place update failed for some reason, exception class not # predictable for arbitrary mixin. self[col.info.name] = new_col def reverse(self): """ Reverse the row order of table rows. The table is reversed in place and there are no function arguments. Examples -------- Create a table with three columns:: >>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'], ... [12,15,18]], names=('firstname','name','tel')) >>> print(t) firstname name tel --------- ------- --- Max Miller 12 Jo Miller 15 John Jackson 18 Reversing order:: >>> t.reverse() >>> print(t) firstname name tel --------- ------- --- John Jackson 18 Jo Miller 15 Max Miller 12 """ for col in self.columns.values(): # First statement in try: will succeed if the column supports an in-place # update, and matches the legacy behavior of astropy Table. However, # some mixin classes may not support this, so in that case just drop # in the entire new column. See #9836, #9553, and #9536 for discussion. new_col = col[::-1] try: col[:] = new_col except Exception: # In-place update failed for some reason, exception class not # predictable for arbitrary mixin. self[col.info.name] = new_col for index in self.indices: index.reverse() def round(self, decimals=0): """ Round numeric columns in-place to the specified number of decimals. Non-numeric columns will be ignored. Examples -------- Create three columns with different types: >>> t = Table([[1, 4, 5], [-25.55, 12.123, 85], ... ['a', 'b', 'c']], names=('a', 'b', 'c')) >>> print(t) a b c --- ------ --- 1 -25.55 a 4 12.123 b 5 85.0 c Round them all to 0: >>> t.round(0) >>> print(t) a b c --- ----- --- 1 -26.0 a 4 12.0 b 5 85.0 c Round column 'a' to -1 decimal: >>> t.round({'a':-1}) >>> print(t) a b c --- ----- --- 0 -26.0 a 0 12.0 b 0 85.0 c Parameters ---------- decimals: int, dict Number of decimals to round the columns to. If a dict is given, the columns will be rounded to the number specified as the value. If a certain column is not in the dict given, it will remain the same. """ if isinstance(decimals, Mapping): decimal_values = decimals.values() column_names = decimals.keys() elif isinstance(decimals, int): decimal_values = itertools.repeat(decimals) column_names = self.colnames else: raise ValueError("'decimals' argument must be an int or a dict") for colname, decimal in zip(column_names, decimal_values): col = self.columns[colname] if np.issubdtype(col.info.dtype, np.number): try: np.around(col, decimals=decimal, out=col) except TypeError: # Bug in numpy see https://github.com/numpy/numpy/issues/15438 col[()] = np.around(col, decimals=decimal) def copy(self, copy_data=True): """ Return a copy of the table. Parameters ---------- copy_data : bool If `True` (the default), copy the underlying data array. Otherwise, use the same data array. The ``meta`` is always deepcopied regardless of the value for ``copy_data``. """ out = self.__class__(self, copy=copy_data) # If the current table is grouped then do the same in the copy if hasattr(self, "_groups"): out._groups = groups.TableGroups( out, indices=self._groups._indices, keys=self._groups._keys ) return out def __deepcopy__(self, memo=None): return self.copy(True) def __copy__(self): return self.copy(False) def __lt__(self, other): return super().__lt__(other) def __gt__(self, other): return super().__gt__(other) def __le__(self, other): return super().__le__(other) def __ge__(self, other): return super().__ge__(other) def __eq__(self, other): return self._rows_equal(other) def __ne__(self, other): return ~self.__eq__(other) def _rows_equal(self, other): """ Row-wise comparison of table with any other object. This is actual implementation for __eq__. Returns a 1-D boolean numpy array showing result of row-wise comparison. This is the same as the ``==`` comparison for tables. Parameters ---------- other : Table or DataFrame or ndarray An object to compare with table Examples -------- Comparing one Table with other:: >>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) >>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) >>> t1._rows_equal(t2) array([ True, True]) """ if isinstance(other, Table): other = other.as_array() if self.has_masked_columns: if isinstance(other, np.ma.MaskedArray): result = self.as_array() == other else: # If mask is True, then by definition the row doesn't match # because the other array is not masked. false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names]) result = (self.as_array().data == other) & (self.mask == false_mask) else: if isinstance(other, np.ma.MaskedArray): # If mask is True, then by definition the row doesn't match # because the other array is not masked. false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names]) result = (self.as_array() == other.data) & (other.mask == false_mask) else: result = self.as_array() == other return result def values_equal(self, other): """ Element-wise comparison of table with another table, list, or scalar. Returns a ``Table`` with the same columns containing boolean values showing result of comparison. Parameters ---------- other : table-like object or list or scalar Object to compare with table Examples -------- Compare one Table with other:: >>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c')) >>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c')) >>> t1.values_equal(t2) <Table length=2> a b c bool bool bool ---- ----- ----- True False False True True True """ if isinstance(other, Table): names = other.colnames else: try: other = Table(other, copy=False) names = other.colnames except Exception: # Broadcast other into a dict, so e.g. other = 2 will turn into # other = {'a': 2, 'b': 2} and then equality does a # column-by-column broadcasting. names = self.colnames other = {name: other for name in names} # Require column names match but do not require same column order if set(self.colnames) != set(names): raise ValueError("cannot compare tables with different column names") eqs = [] for name in names: try: np.broadcast(self[name], other[name]) # Check if broadcast-able # Catch the numpy FutureWarning related to equality checking, # "elementwise comparison failed; returning scalar instead, but # in the future will perform elementwise comparison". Turn this # into an exception since the scalar answer is not what we want. with warnings.catch_warnings(record=True) as warns: warnings.simplefilter("always") eq = self[name] == other[name] if ( warns and issubclass(warns[-1].category, FutureWarning) and "elementwise comparison failed" in str(warns[-1].message) ): raise FutureWarning(warns[-1].message) except Exception as err: raise ValueError(f"unable to compare column {name}") from err # Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just # broken and completely ignores that it should return an array. if not ( isinstance(eq, np.ndarray) and eq.dtype is np.dtype("bool") and len(eq) == len(self) ): raise TypeError( f"comparison for column {name} returned {eq} " "instead of the expected boolean ndarray" ) eqs.append(eq) out = Table(eqs, names=names) return out @property def groups(self): if not hasattr(self, "_groups"): self._groups = groups.TableGroups(self) return self._groups def group_by(self, keys): """ Group this table by the specified ``keys`` This effectively splits the table into groups which correspond to unique values of the ``keys`` grouping object. The output is a new `~astropy.table.TableGroups` which contains a copy of this table but sorted by row according to ``keys``. The ``keys`` input to `group_by` can be specified in different ways: - String or list of strings corresponding to table column name(s) - Numpy array (homogeneous or structured) with same length as this table - `~astropy.table.Table` with same length as this table Parameters ---------- keys : str, list of str, numpy array, or `~astropy.table.Table` Key grouping object Returns ------- out : `~astropy.table.Table` New table with groups set """ return groups.table_group_by(self, keys) def to_pandas(self, index=None, use_nullable_int=True): """ Return a :class:`pandas.DataFrame` instance The index of the created DataFrame is controlled by the ``index`` argument. For ``index=True`` or the default ``None``, an index will be specified for the DataFrame if there is a primary key index on the Table *and* if it corresponds to a single column. If ``index=False`` then no DataFrame index will be specified. If ``index`` is the name of a column in the table then that will be the DataFrame index. In addition to vanilla columns or masked columns, this supports Table mixin columns like Quantity, Time, or SkyCoord. In many cases these objects have no analog in pandas and will be converted to a "encoded" representation using only Column or MaskedColumn. The exception is Time or TimeDelta columns, which will be converted to the corresponding representation in pandas using ``np.datetime64`` or ``np.timedelta64``. See the example below. Parameters ---------- index : None, bool, str Specify DataFrame index mode use_nullable_int : bool, default=True Convert integer MaskedColumn to pandas nullable integer type. If ``use_nullable_int=False`` or the pandas version does not support nullable integer types (version < 0.24), then the column is converted to float with NaN for missing elements and a warning is issued. Returns ------- dataframe : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance Raises ------ ImportError If pandas is not installed ValueError If the Table has multi-dimensional columns Examples -------- Here we convert a table with a few mixins to a :class:`pandas.DataFrame` instance. >>> import pandas as pd >>> from astropy.table import QTable >>> import astropy.units as u >>> from astropy.time import Time, TimeDelta >>> from astropy.coordinates import SkyCoord >>> q = [1, 2] * u.m >>> tm = Time([1998, 2002], format='jyear') >>> sc = SkyCoord([5, 6], [7, 8], unit='deg') >>> dt = TimeDelta([3, 200] * u.s) >>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt']) >>> df = t.to_pandas(index='tm') >>> with pd.option_context('display.max_columns', 20): ... print(df) q sc.ra sc.dec dt tm 1998-01-01 1.0 5.0 7.0 0 days 00:00:03 2002-01-01 2.0 6.0 8.0 0 days 00:03:20 """ from pandas import DataFrame, Series if index is not False: if index in (None, True): # Default is to use the table primary key if available and a single column if self.primary_key and len(self.primary_key) == 1: index = self.primary_key[0] else: index = False else: if index not in self.colnames: raise ValueError( "index must be None, False, True or a table column name" ) def _encode_mixins(tbl): """Encode a Table ``tbl`` that may have mixin columns to a Table with only astropy Columns + appropriate meta-data to allow subsequent decoding. """ from astropy.time import TimeBase, TimeDelta from . import serialize # Convert any Time or TimeDelta columns and pay attention to masking time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)] if time_cols: # Make a light copy of table and clear any indices new_cols = [] for col in tbl.itercols(): new_col = ( col_copy(col, copy_indices=False) if col.info.indices else col ) new_cols.append(new_col) tbl = tbl.__class__(new_cols, copy=False) # Certain subclasses (e.g. TimeSeries) may generate new indices on # table creation, so make sure there are no indices on the table. for col in tbl.itercols(): col.info.indices.clear() for col in time_cols: if isinstance(col, TimeDelta): # Convert to nanoseconds (matches astropy datetime64 support) new_col = (col.sec * 1e9).astype("timedelta64[ns]") nat = np.timedelta64("NaT") else: new_col = col.datetime64.copy() nat = np.datetime64("NaT") if col.masked: new_col[col.mask] = nat tbl[col.info.name] = new_col # Convert the table to one with no mixins, only Column objects. encode_tbl = serialize.represent_mixins_as_columns(tbl) return encode_tbl tbl = _encode_mixins(self) badcols = [name for name, col in self.columns.items() if len(col.shape) > 1] if badcols: # fmt: off raise ValueError( f'Cannot convert a table with multidimensional columns to a ' f'pandas DataFrame. Offending columns are: {badcols}\n' f'One can filter out such columns using:\n' f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n' f'tbl[names].to_pandas(...)' ) # fmt: on out = OrderedDict() for name, column in tbl.columns.items(): if getattr(column.dtype, "isnative", True): out[name] = column else: out[name] = column.data.byteswap().newbyteorder("=") if isinstance(column, MaskedColumn) and np.any(column.mask): if column.dtype.kind in ["i", "u"]: pd_dtype = column.dtype.name if use_nullable_int: # Convert int64 to Int64, uint32 to UInt32, etc for nullable types pd_dtype = pd_dtype.replace("i", "I").replace("u", "U") out[name] = Series(out[name], dtype=pd_dtype) # If pandas is older than 0.24 the type may have turned to float if column.dtype.kind != out[name].dtype.kind: warnings.warn( f"converted column '{name}' from {column.dtype} to" f" {out[name].dtype}", TableReplaceWarning, stacklevel=3, ) elif column.dtype.kind not in ["f", "c"]: out[name] = column.astype(object).filled(np.nan) kwargs = {} if index: idx = out.pop(index) kwargs["index"] = idx # We add the table index to Series inputs (MaskedColumn with int values) to override # its default RangeIndex, see #11432 for v in out.values(): if isinstance(v, Series): v.index = idx df = DataFrame(out, **kwargs) if index: # Explicitly set the pandas DataFrame index to the original table # index name. df.index.name = idx.info.name return df @classmethod def from_pandas(cls, dataframe, index=False, units=None): """ Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance In addition to converting generic numeric or string columns, this supports conversion of pandas Date and Time delta columns to `~astropy.time.Time` and `~astropy.time.TimeDelta` columns, respectively. Parameters ---------- dataframe : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance index : bool Include the index column in the returned table (default=False) units: dict A dict mapping column names to to a `~astropy.units.Unit`. The columns will have the specified unit in the Table. Returns ------- table : `~astropy.table.Table` A `~astropy.table.Table` (or subclass) instance Raises ------ ImportError If pandas is not installed Examples -------- Here we convert a :class:`pandas.DataFrame` instance to a `~astropy.table.QTable`. >>> import numpy as np >>> import pandas as pd >>> from astropy.table import QTable >>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]') >>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]')) >>> df = pd.DataFrame({'time': time}) >>> df['dt'] = dt >>> df['x'] = [3., 4.] >>> with pd.option_context('display.max_columns', 20): ... print(df) time dt x 0 1998-01-01 0 days 00:00:01 3.0 1 2002-01-01 0 days 00:05:00 4.0 >>> QTable.from_pandas(df) <QTable length=2> time dt x Time TimeDelta float64 ----------------------- --------- ------- 1998-01-01T00:00:00.000 1.0 3.0 2002-01-01T00:00:00.000 300.0 4.0 """ out = OrderedDict() names = list(dataframe.columns) columns = [dataframe[name] for name in names] datas = [np.array(column) for column in columns] masks = [np.array(column.isnull()) for column in columns] if index: index_name = dataframe.index.name or "index" while index_name in names: index_name = "_" + index_name + "_" names.insert(0, index_name) columns.insert(0, dataframe.index) datas.insert(0, np.array(dataframe.index)) masks.insert(0, np.zeros(len(dataframe), dtype=bool)) if units is None: units = [None] * len(names) else: if not isinstance(units, Mapping): raise TypeError('Expected a Mapping "column-name" -> "unit"') not_found = set(units.keys()) - set(names) if not_found: warnings.warn(f"`units` contains additional columns: {not_found}") units = [units.get(name) for name in names] for name, column, data, mask, unit in zip(names, columns, datas, masks, units): if column.dtype.kind in ["u", "i"] and np.any(mask): # Special-case support for pandas nullable int np_dtype = str(column.dtype).lower() data = np.zeros(shape=column.shape, dtype=np_dtype) data[~mask] = column[~mask] out[name] = MaskedColumn( data=data, name=name, mask=mask, unit=unit, copy=False ) continue if data.dtype.kind == "O": # If all elements of an object array are string-like or np.nan # then coerce back to a native numpy str/unicode array. string_types = (str, bytes) nan = np.nan if all(isinstance(x, string_types) or x is nan for x in data): # Force any missing (null) values to b''. Numpy will # upcast to str/unicode as needed. data[mask] = b"" # When the numpy object array is represented as a list then # numpy initializes to the correct string or unicode type. data = np.array([x for x in data]) # Numpy datetime64 if data.dtype.kind == "M": from astropy.time import Time out[name] = Time(data, format="datetime64") if np.any(mask): out[name][mask] = np.ma.masked out[name].format = "isot" # Numpy timedelta64 elif data.dtype.kind == "m": from astropy.time import TimeDelta data_sec = data.astype("timedelta64[ns]").astype(np.float64) / 1e9 out[name] = TimeDelta(data_sec, format="sec") if np.any(mask): out[name][mask] = np.ma.masked else: if np.any(mask): out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit) else: out[name] = Column(data=data, name=name, unit=unit) return cls(out) info = TableInfo() class QTable(Table): """A class to represent tables of heterogeneous data. `~astropy.table.QTable` provides a class for heterogeneous tabular data which can be easily modified, for instance adding columns or new rows. The `~astropy.table.QTable` class is identical to `~astropy.table.Table` except that columns with an associated ``unit`` attribute are converted to `~astropy.units.Quantity` objects. See also: - https://docs.astropy.org/en/stable/table/ - https://docs.astropy.org/en/stable/table/mixin_columns.html Parameters ---------- data : numpy ndarray, dict, list, table-like object, optional Data to initialize table. masked : bool, optional Specify whether the table is masked. names : list, optional Specify column names. dtype : list, optional Specify column data types. meta : dict, optional Metadata associated with the table. copy : bool, optional Copy the input data. Default is True. rows : numpy ndarray, list of list, optional Row-oriented data for table instead of ``data`` argument. copy_indices : bool, optional Copy any indices in the input data. Default is True. **kwargs : dict, optional Additional keyword args when converting table-like object. """ def _is_mixin_for_table(self, col): """ Determine if ``col`` should be added to the table directly as a mixin column. """ return has_info_class(col, MixinInfo) def _convert_col_for_table(self, col): if isinstance(col, Column) and getattr(col, "unit", None) is not None: # We need to turn the column into a quantity; use subok=True to allow # Quantity subclasses identified in the unit (such as u.mag()). q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity try: qcol = q_cls(col.data, col.unit, copy=False, subok=True) except Exception as exc: warnings.warn( f"column {col.info.name} has a unit but is kept as " f"a {col.__class__.__name__} as an attempt to " f"convert it to Quantity failed with:\n{exc!r}", AstropyUserWarning, ) else: qcol.info = col.info qcol.info.indices = col.info.indices col = qcol else: col = super()._convert_col_for_table(col) return col
0be78b1f8a74b82ff19afbffd77d5ef2d03fc94231fcee54c0928630ce425bdd
# Licensed under a 3-clause BSD style license - see LICENSE.rst import astropy.config as _config from astropy.utils.compat import optional_deps from .column import Column, ColumnInfo, MaskedColumn, StringTruncateWarning __all__ = [ "BST", "Column", "ColumnGroups", "ColumnInfo", "Conf", "JSViewer", "MaskedColumn", "NdarrayMixin", "QTable", "Row", "SCEngine", "SerializedColumn", "SortedArray", "StringTruncateWarning", "Table", "TableAttribute", "TableColumns", "TableFormatter", "TableGroups", "TableMergeError", "TableReplaceWarning", "conf", "connect", "hstack", "join", "registry", "represent_mixins_as_columns", "setdiff", "unique", "vstack", "dstack", "conf", "join_skycoord", "join_distance", "PprintIncludeExclude", ] class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.table`. """ auto_colname = _config.ConfigItem( "col{0}", "The template that determines the name of a column if it cannot be " "determined. Uses new-style (format method) string formatting.", aliases=["astropy.table.column.auto_colname"], ) default_notebook_table_class = _config.ConfigItem( "table-striped table-bordered table-condensed", "The table class to be used in Jupyter notebooks when displaying " "tables (and not overridden). See <https://getbootstrap.com/css/#tables " "for a list of useful bootstrap classes.", ) replace_warnings = _config.ConfigItem( [], "List of conditions for issuing a warning when replacing a table " "column using setitem, e.g. t['a'] = value. Allowed options are " "'always', 'slice', 'refcount', 'attributes'.", "string_list", ) replace_inplace = _config.ConfigItem( False, "Always use in-place update of a table column when using setitem, " "e.g. t['a'] = value. This overrides the default behavior of " "replacing the column entirely with the new value when possible. " "This configuration option will be deprecated and then removed in " "subsequent major releases.", ) conf = Conf() # Finally import the formats for the read and write method but delay building # the documentation until all are loaded. (#5275) from astropy.io import registry from . import connect from .bst import BST from .groups import ColumnGroups, TableGroups from .operations import ( TableMergeError, dstack, hstack, join, join_distance, join_skycoord, setdiff, unique, vstack, ) from .serialize import SerializedColumn, represent_mixins_as_columns from .soco import SCEngine from .sorted_array import SortedArray from .table import ( NdarrayMixin, PprintIncludeExclude, QTable, Row, Table, TableAttribute, TableColumns, TableFormatter, TableReplaceWarning, ) with registry.delay_doc_updates(Table): # Import routines that connect readers/writers to astropy.table import astropy.io.ascii.connect import astropy.io.fits.connect import astropy.io.misc.connect import astropy.io.misc.pandas.connect import astropy.io.votable.connect from .jsviewer import JSViewer if optional_deps.HAS_ASDF_ASTROPY: import asdf_astropy.io.connect else: import astropy.io.misc.asdf.connect
6ab54645bd0958735b8989ab44b3774a1d7c1b3560a0bb28ad1132ca78225551
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import warnings import weakref from copy import deepcopy import numpy as np from numpy import ma from astropy.units import Quantity, StructuredUnit, Unit from astropy.utils.console import color_print from astropy.utils.data_info import BaseColumnInfo, dtype_info_name from astropy.utils.metadata import MetaData from astropy.utils.misc import dtype_bytes_or_chars from . import groups, pprint # These "shims" provide __getitem__ implementations for Column and MaskedColumn from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim # Create a generic TableFormatter object for use by bare columns with no # parent table. FORMATTER = pprint.TableFormatter() class StringTruncateWarning(UserWarning): """ Warning class for when a string column is assigned a value that gets truncated because the base (numpy) string length is too short. This does not inherit from AstropyWarning because we want to use stacklevel=2 to show the user where the issue occurred in their code. """ pass # Always emit this warning, not just the first instance warnings.simplefilter("always", StringTruncateWarning) def _auto_names(n_cols): from . import conf return [str(conf.auto_colname).format(i) for i in range(n_cols)] # list of one and two-dimensional comparison functions, which sometimes return # a Column class and sometimes a plain array. Used in __array_wrap__ to ensure # they only return plain (masked) arrays (see #1446 and #1685) _comparison_functions = { np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal, np.isfinite, np.isinf, np.isnan, np.sign, np.signbit, } def col_copy(col, copy_indices=True): """ Mixin-safe version of Column.copy() (with copy_data=True). Parameters ---------- col : Column or mixin column Input column copy_indices : bool Copy the column ``indices`` attribute Returns ------- col : Copy of input column """ if isinstance(col, BaseColumn): return col.copy() newcol = col.copy() if hasattr(col, "copy") else deepcopy(col) # If the column has info defined, we copy it and adjust any indices # to point to the copied column. By guarding with the if statement, # we avoid side effects (of creating the default info instance). if "info" in col.__dict__: newcol.info = col.info if copy_indices and col.info.indices: newcol.info.indices = deepcopy(col.info.indices) for index in newcol.info.indices: index.replace_col(col, newcol) return newcol class FalseArray(np.ndarray): """ Boolean mask array that is always False. This is used to create a stub ``mask`` property which is a boolean array of ``False`` used by default for mixin columns and corresponding to the mixin column data shape. The ``mask`` looks like a normal numpy array but an exception will be raised if ``True`` is assigned to any element. The consequences of the limitation are most obvious in the high-level table operations. Parameters ---------- shape : tuple Data shape """ def __new__(cls, shape): obj = np.zeros(shape, dtype=bool).view(cls) return obj def __setitem__(self, item, val): val = np.asarray(val) if np.any(val): raise ValueError( "Cannot set any element of {} class to True".format( self.__class__.__name__ ) ) def _expand_string_array_for_values(arr, values): """ For string-dtype return a version of ``arr`` that is wide enough for ``values``. If ``arr`` is not string-dtype or does not need expansion then return ``arr``. Parameters ---------- arr : np.ndarray Input array values : scalar or array-like Values for width comparison for string arrays Returns ------- arr_expanded : np.ndarray """ if arr.dtype.kind in ("U", "S") and values is not np.ma.masked: # Find the length of the longest string in the new values. values_str_len = np.char.str_len(values).max() # Determine character repeat count of arr.dtype. Returns a positive # int or None (something like 'U0' is not possible in numpy). If new values # are longer than current then make a new (wider) version of arr. arr_str_len = dtype_bytes_or_chars(arr.dtype) if arr_str_len and values_str_len > arr_str_len: arr_dtype = arr.dtype.byteorder + arr.dtype.kind + str(values_str_len) arr = arr.astype(arr_dtype) return arr def _convert_sequence_data_to_array(data, dtype=None): """Convert N-d sequence-like data to ndarray or MaskedArray. This is the core function for converting Python lists or list of lists to a numpy array. This handles embedded np.ma.masked constants in ``data`` along with the special case of an homogeneous list of MaskedArray elements. Considerations: - np.ma.array is about 50 times slower than np.array for list input. This function avoids using np.ma.array on list input. - np.array emits a UserWarning for embedded np.ma.masked, but only for int or float inputs. For those it converts to np.nan and forces float dtype. For other types np.array is inconsistent, for instance converting np.ma.masked to "0.0" for str types. - Searching in pure Python for np.ma.masked in ``data`` is comparable in speed to calling ``np.array(data)``. - This function may end up making two additional copies of input ``data``. Parameters ---------- data : N-d sequence Input data, typically list or list of lists dtype : None or dtype-like Output datatype (None lets np.array choose) Returns ------- np_data : np.ndarray or np.ma.MaskedArray """ np_ma_masked = np.ma.masked # Avoid repeated lookups of this object # Special case of an homogeneous list of MaskedArray elements (see #8977). # np.ma.masked is an instance of MaskedArray, so exclude those values. if ( hasattr(data, "__len__") and len(data) > 0 and all( isinstance(val, np.ma.MaskedArray) and val is not np_ma_masked for val in data ) ): np_data = np.ma.array(data, dtype=dtype) return np_data # First convert data to a plain ndarray. If there are instances of np.ma.masked # in the data this will issue a warning for int and float. with warnings.catch_warnings(record=True) as warns: # Ensure this warning from numpy is always enabled and that it is not # converted to an error (which can happen during pytest). warnings.filterwarnings( "always", category=UserWarning, message=".*converting a masked element.*" ) # FutureWarning in numpy 1.21. See https://github.com/astropy/astropy/issues/11291 # and https://github.com/numpy/numpy/issues/18425. warnings.filterwarnings( "always", category=FutureWarning, message=".*Promotion of numbers and bools to strings.*", ) try: np_data = np.array(data, dtype=dtype) except np.ma.MaskError: # Catches case of dtype=int with masked values, instead let it # convert to float np_data = np.array(data) except Exception: # Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity. # First try to interpret the data as Quantity. If that still fails then fall # through to object try: np_data = Quantity(data, dtype) except Exception: dtype = object np_data = np.array(data, dtype=dtype) if np_data.ndim == 0 or (np_data.ndim > 0 and len(np_data) == 0): # Implies input was a scalar or an empty list (e.g. initializing an # empty table with pre-declared names and dtypes but no data). Here we # need to fall through to initializing with the original data=[]. return data # If there were no warnings and the data are int or float, then we are done. # Other dtypes like string or complex can have masked values and the # np.array() conversion gives the wrong answer (e.g. converting np.ma.masked # to the string "0.0"). if len(warns) == 0 and np_data.dtype.kind in ("i", "f"): return np_data # Now we need to determine if there is an np.ma.masked anywhere in input data. # Make a statement like below to look for np.ma.masked in a nested sequence. # Because np.array(data) succeeded we know that `data` has a regular N-d # structure. Find ma_masked: # any(any(any(d2 is ma_masked for d2 in d1) for d1 in d0) for d0 in data) # Using this eval avoids creating a copy of `data` in the more-usual case of # no masked elements. any_statement = "d0 is ma_masked" for ii in reversed(range(np_data.ndim)): if ii == 0: any_statement = f"any({any_statement} for d0 in data)" elif ii == np_data.ndim - 1: any_statement = f"any(d{ii} is ma_masked for d{ii} in d{ii-1})" else: any_statement = f"any({any_statement} for d{ii} in d{ii-1})" context = {"ma_masked": np.ma.masked, "data": data} has_masked = eval(any_statement, context) # If there are any masks then explicitly change each one to a fill value and # set a mask boolean array. If not has_masked then we're done. if has_masked: mask = np.zeros(np_data.shape, dtype=bool) data_filled = np.array(data, dtype=object) # Make type-appropriate fill value based on initial conversion. if np_data.dtype.kind == "U": fill = "" elif np_data.dtype.kind == "S": fill = b"" else: # Zero works for every numeric type. fill = 0 ranges = [range(dim) for dim in np_data.shape] for idxs in itertools.product(*ranges): val = data_filled[idxs] if val is np_ma_masked: data_filled[idxs] = fill mask[idxs] = True elif isinstance(val, bool) and dtype is None: # If we see a bool and dtype not specified then assume bool for # the entire array. Not perfect but in most practical cases OK. # Unfortunately numpy types [False, 0] as int, not bool (and # [False, np.ma.masked] => array([0.0, np.nan])). dtype = bool # If no dtype is provided then need to convert back to list so np.array # does type autodetection. if dtype is None: data_filled = data_filled.tolist() # Use np.array first to convert `data` to ndarray (fast) and then make # masked array from an ndarray with mask (fast) instead of from `data`. np_data = np.ma.array(np.array(data_filled, dtype=dtype), mask=mask) return np_data def _make_compare(oper): """ Make Column comparison methods which encode the ``other`` object to utf-8 in the case of a bytestring dtype for Py3+. Parameters ---------- oper : str Operator name """ def _compare(self, other): op = oper # copy enclosed ref to allow swap below # If other is a Quantity, we should let it do the work, since # it can deal with our possible unit (which, for MaskedColumn, # would get dropped below, as '.data' is accessed in super()). if isinstance(other, Quantity): return NotImplemented # If we are unicode and other is a column with bytes, defer to it for # doing the unicode sandwich. This avoids problems like those # discussed in #6838 and #6899. if ( self.dtype.kind == "U" and isinstance(other, Column) and other.dtype.kind == "S" ): return NotImplemented # If we are bytes, encode other as needed. if self.dtype.char == "S": other = self._encode_str(other) # Now just let the regular ndarray.__eq__, etc., take over. result = getattr(super(Column, self), op)(other) # But we should not return Column instances for this case. return result.data if isinstance(result, Column) else result return _compare class ColumnInfo(BaseColumnInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. """ attr_names = BaseColumnInfo.attr_names | {"groups"} _attrs_no_copy = BaseColumnInfo._attrs_no_copy | {"groups"} attrs_from_parent = attr_names _supports_indexing = True # For structured columns, data is used to store a dict of columns. # Store entries in that dict as name.key instead of name.data.key. _represent_as_dict_primary_data = "data" def _represent_as_dict(self): result = super()._represent_as_dict() names = self._parent.dtype.names # For a regular column, we are done, but for a structured # column, we use a SerializedColumns to store the pieces. if names is None: return result from .serialize import SerializedColumn data = SerializedColumn() # If this column has a StructuredUnit, we split it and store # it on the corresponding part. Otherwise, we just store it # as an attribute below. All other attributes we remove from # the parts, so that we do not store them multiple times. # (Note that attributes are not linked to the parent, so it # is safe to reset them.) # TODO: deal with (some of) this in Column.__getitem__? # Alternatively: should we store info on the first part? # TODO: special-case format somehow? Can we have good formats # for structured columns? unit = self.unit if isinstance(unit, StructuredUnit) and len(unit) == len(names): units = unit.values() unit = None # No need to store as an attribute as well. else: units = [None] * len(names) for name, part_unit in zip(names, units): part = Column(self._parent[name]) part.unit = part_unit part.description = None part.meta = {} part.format = None data[name] = part # Create the attributes required to reconstruct the column. result["data"] = data # Store the shape if needed. Just like scalar data, a structured data # column (e.g. with dtype `f8,i8`) can be multidimensional within each # row and have a shape, and that needs to be distinguished from the # case that each entry in the structure has the same shape (e.g., # distinguist a column with dtype='f8,i8' and 2 elements per row from # one with dtype '2f8,2i8' and just one element per row). if shape := self._parent.shape[1:]: result["shape"] = list(shape) # Also store the standard info attributes since these are # stored on the parent and can thus just be passed on as # arguments. TODO: factor out with essentially the same # code in serialize._represent_mixin_as_column. if unit is not None and unit != "": result["unit"] = unit if self.format is not None: result["format"] = self.format if self.description is not None: result["description"] = self.description if self.meta: result["meta"] = self.meta return result def _construct_from_dict(self, map): if not isinstance(map.get("data"), dict): return super()._construct_from_dict(map) # Reconstruct a structured Column, by first making an empty column # and then filling it with the structured data. data = map.pop("data") shape = tuple(map.pop("shape", ())) # There are three elements in the shape of `part`: # (table length, shape of structured column, shape of part like '3f8') # The column `shape` only includes the second, so by adding one to its # length to include the table length, we pick off a possible last bit. dtype = np.dtype( [ (name, part.dtype, part.shape[len(shape) + 1 :]) for name, part in data.items() ] ) units = tuple(col.info.unit for col in data.values()) if all(unit is not None for unit in units): map["unit"] = StructuredUnit(units, dtype) map.update(dtype=dtype, shape=shape, length=len(data[dtype.names[0]])) # Construct the empty column from `map` (note: 'data' removed above). result = super()._construct_from_dict(map) # Fill it with the structured data. for name in dtype.names: result[name] = data[name] return result def new_like(self, cols, length, metadata_conflicts="warn", name=None): """ Return a new Column instance which is consistent with the input ``cols`` and has ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- cols : list List of input columns length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : Column (or subclass) New instance of this class consistent with ``cols`` """ attrs = self.merge_cols_attributes( cols, metadata_conflicts, name, ("meta", "unit", "format", "description") ) return self._parent_cls(length=length, **attrs) def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. For Column this is just the column itself. Returns ------- arrays : list of ndarray """ return [self._parent] class BaseColumn(_ColumnGetitemShim, np.ndarray): meta = MetaData() def __new__( cls, data=None, name=None, dtype=None, shape=(), length=0, description=None, unit=None, format=None, meta=None, copy=False, copy_indices=True, ): if data is None: self_data = np.zeros((length,) + shape, dtype=dtype) elif isinstance(data, BaseColumn) and hasattr(data, "_name"): # When unpickling a MaskedColumn, ``data`` will be a bare # BaseColumn with none of the expected attributes. In this case # do NOT execute this block which initializes from ``data`` # attributes. self_data = np.array(data.data, dtype=dtype, copy=copy) if description is None: description = data.description if unit is None: unit = unit or data.unit if format is None: format = data.format if meta is None: meta = data.meta if name is None: name = data.name elif isinstance(data, Quantity): if unit is None: self_data = np.array(data, dtype=dtype, copy=copy) unit = data.unit else: self_data = Quantity(data, unit, dtype=dtype, copy=copy).value # If 'info' has been defined, copy basic properties (if needed). if "info" in data.__dict__: if description is None: description = data.info.description if format is None: format = data.info.format if meta is None: meta = data.info.meta else: if np.dtype(dtype).char == "S": data = cls._encode_str(data) self_data = np.array(data, dtype=dtype, copy=copy) self = self_data.view(cls) self._name = None if name is None else str(name) self._parent_table = None self.unit = unit self._format = format self.description = description self.meta = meta self.indices = deepcopy(getattr(data, "indices", [])) if copy_indices else [] for index in self.indices: index.replace_col(data, self) return self @property def data(self): return self.view(np.ndarray) @property def value(self): """ An alias for the existing ``data`` attribute. """ return self.data @property def parent_table(self): # Note: It seems there are some cases where _parent_table is not set, # such after restoring from a pickled Column. Perhaps that should be # fixed, but this is also okay for now. if getattr(self, "_parent_table", None) is None: return None else: return self._parent_table() @parent_table.setter def parent_table(self, table): if table is None: self._parent_table = None else: self._parent_table = weakref.ref(table) info = ColumnInfo() def copy(self, order="C", data=None, copy_data=True): """ Return a copy of the current instance. If ``data`` is supplied then a view (reference) of ``data`` is used, and ``copy_data`` is ignored. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of ``a`` as closely as possible. (Note that this function and :func:numpy.copy are very similar, but have different default values for their order= arguments.) Default is 'C'. data : array, optional If supplied then use a view of ``data`` instead of the instance data. This allows copying the instance attributes and meta. copy_data : bool, optional Make a copy of the internal numpy array instead of using a reference. Default is True. Returns ------- col : Column or MaskedColumn Copy of the current column (same type as original) """ if data is None: data = self.data if copy_data: data = data.copy(order) out = data.view(self.__class__) out.__array_finalize__(self) # If there is meta on the original column then deepcopy (since "copy" of column # implies complete independence from original). __array_finalize__ will have already # made a light copy. I'm not sure how to avoid that initial light copy. if self.meta is not None: out.meta = self.meta # MetaData descriptor does a deepcopy here # for MaskedColumn, MaskedArray.__array_finalize__ also copies mask # from self, which is not the idea here, so undo if isinstance(self, MaskedColumn): out._mask = data._mask self._copy_groups(out) return out def __setstate__(self, state): """ Restore the internal state of the Column/MaskedColumn for pickling purposes. This requires that the last element of ``state`` is a 5-tuple that has Column-specific state values. """ # Get the Column attributes names = ("_name", "_unit", "_format", "description", "meta", "indices") attrs = {name: val for name, val in zip(names, state[-1])} state = state[:-1] # Using super().__setstate__(state) gives # "TypeError 'int' object is not iterable", raised in # astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__() # Previously, it seems to have given an infinite recursion. # Hence, manually call the right super class to actually set up # the array object. super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray super_class.__setstate__(self, state) # Set the Column attributes for name, val in attrs.items(): setattr(self, name, val) self._parent_table = None def __reduce__(self): """ Return a 3-tuple for pickling a Column. Use the super-class functionality but then add in a 5-tuple of Column-specific values that get used in __setstate__. """ super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self) # Define Column-specific attrs and meta that gets added to state. column_state = ( self.name, self.unit, self.format, self.description, self.meta, self.indices, ) state = state + (column_state,) return reconstruct_func, reconstruct_func_args, state def __array_finalize__(self, obj): # Obj will be none for direct call to Column() creator if obj is None: return if callable(super().__array_finalize__): super().__array_finalize__(obj) # Self was created from template (e.g. obj[slice] or (obj * 2)) # or viewcast e.g. obj.view(Column). In either case we want to # init Column attributes for self from obj if possible. self.parent_table = None if not hasattr(self, "indices"): # may have been copied in __new__ self.indices = [] self._copy_attrs(obj) if "info" in getattr(obj, "__dict__", {}): self.info = obj.info def __array_wrap__(self, out_arr, context=None): """ __array_wrap__ is called at the end of every ufunc. Normally, we want a Column object back and do not have to do anything special. But there are two exceptions: 1) If the output shape is different (e.g. for reduction ufuncs like sum() or mean()), a Column still linking to a parent_table makes little sense, so we return the output viewed as the column content (ndarray or MaskedArray). For this case, we use "[()]" to select everything, and to ensure we convert a zero rank array to a scalar. (For some reason np.sum() returns a zero rank scalar array while np.mean() returns a scalar; So the [()] is needed for this case. 2) When the output is created by any function that returns a boolean we also want to consistently return an array rather than a column (see #1446 and #1685) """ out_arr = super().__array_wrap__(out_arr, context) if self.shape != out_arr.shape or ( isinstance(out_arr, BaseColumn) and (context is not None and context[0] in _comparison_functions) ): return out_arr.data[()] else: return out_arr @property def name(self): """ The name of this column. """ return self._name @name.setter def name(self, val): if val is not None: val = str(val) if self.parent_table is not None: table = self.parent_table table.columns._rename_column(self.name, val) self._name = val @property def format(self): """ Format string for displaying values in this column. """ return self._format @format.setter def format(self, format_string): prev_format = getattr(self, "_format", None) self._format = format_string # set new format string try: # test whether it formats without error exemplarily self.pformat(max_lines=1) except Exception as err: # revert to restore previous format if there was one self._format = prev_format raise ValueError( "Invalid format for column '{}': could not display " "values in this column using this format".format(self.name) ) from err @property def descr(self): """Array-interface compliant full description of the column. This returns a 3-tuple (name, type, shape) that can always be used in a structured array dtype definition. """ return (self.name, self.dtype.str, self.shape[1:]) def iter_str_vals(self): """ Return an iterator that yields the string-formatted values of this column. Returns ------- str_vals : iterator Column values formatted as strings """ # Iterate over formatted values with no max number of lines, no column # name, no unit, and ignoring the returned header info in outs. _pformat_col_iter = self._formatter._pformat_col_iter yield from _pformat_col_iter( self, -1, show_name=False, show_unit=False, show_dtype=False, outs={} ) def attrs_equal(self, col): """Compare the column attributes of ``col`` to this object. The comparison attributes are: ``name``, ``unit``, ``dtype``, ``format``, ``description``, and ``meta``. Parameters ---------- col : Column Comparison column Returns ------- equal : bool True if all attributes are equal """ if not isinstance(col, BaseColumn): raise ValueError("Comparison `col` must be a Column or MaskedColumn object") attrs = ("name", "unit", "dtype", "format", "description", "meta") equal = all(getattr(self, x) == getattr(col, x) for x in attrs) return equal @property def _formatter(self): return FORMATTER if (self.parent_table is None) else self.parent_table.formatter def pformat( self, max_lines=None, show_name=True, show_unit=False, show_dtype=False, html=False, ): """Return a list of formatted string representation of column values. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default will be determined using the ``astropy.conf.max_lines`` configuration item. If a negative value of ``max_lines`` is supplied then there is no line limit applied. Parameters ---------- max_lines : int Maximum lines of output (header + data rows) show_name : bool Include column name. Default is True. show_unit : bool Include a header row for unit. Default is False. show_dtype : bool Include column dtype. Default is False. html : bool Format the output as an HTML table. Default is False. Returns ------- lines : list List of lines with header and formatted column values """ _pformat_col = self._formatter._pformat_col lines, outs = _pformat_col( self, max_lines, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, html=html, ) return lines def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False): """Print a formatted string representation of column values. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default will be determined using the ``astropy.conf.max_lines`` configuration item. If a negative value of ``max_lines`` is supplied then there is no line limit applied. Parameters ---------- max_lines : int Maximum number of values in output show_name : bool Include column name. Default is True. show_unit : bool Include a header row for unit. Default is False. show_dtype : bool Include column dtype. Default is True. """ _pformat_col = self._formatter._pformat_col lines, outs = _pformat_col( self, max_lines, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, ) n_header = outs["n_header"] for i, line in enumerate(lines): if i < n_header: color_print(line, "red") else: print(line) def more(self, max_lines=None, show_name=True, show_unit=False): """Interactively browse column with a paging interface. Supported keys:: f, <space> : forward one page b : back one page r : refresh same page n : next row p : previous row < : go to beginning > : go to end q : quit browsing h : print this help Parameters ---------- max_lines : int Maximum number of lines in table output. show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is False. """ _more_tabcol = self._formatter._more_tabcol _more_tabcol( self, max_lines=max_lines, show_name=show_name, show_unit=show_unit ) @property def unit(self): """ The unit associated with this column. May be a string or a `astropy.units.UnitBase` instance. Setting the ``unit`` property does not change the values of the data. To perform a unit conversion, use ``convert_unit_to``. """ return self._unit @unit.setter def unit(self, unit): if unit is None: self._unit = None else: self._unit = Unit(unit, parse_strict="silent") @unit.deleter def unit(self): self._unit = None def searchsorted(self, v, side="left", sorter=None): # For bytes type data, encode the `v` value as UTF-8 (if necessary) before # calling searchsorted. This prevents a factor of 1000 slowdown in # searchsorted in this case. a = self.data if a.dtype.kind == "S" and not isinstance(v, bytes): v = np.asarray(v) if v.dtype.kind == "U": v = np.char.encode(v, "utf-8") return np.searchsorted(a, v, side=side, sorter=sorter) searchsorted.__doc__ = np.ndarray.searchsorted.__doc__ def convert_unit_to(self, new_unit, equivalencies=[]): """ Converts the values of the column in-place from the current unit to the given unit. To change the unit associated with this column without actually changing the data values, simply set the ``unit`` property. Parameters ---------- new_unit : str or `astropy.units.UnitBase` instance The unit to convert to. equivalencies : list of tuple A list of equivalence pairs to try if the unit are not directly convertible. See :ref:`astropy:unit_equivalencies`. Raises ------ astropy.units.UnitsError If units are inconsistent """ if self.unit is None: raise ValueError("No unit set on column") self.data[:] = self.unit.to(new_unit, self.data, equivalencies=equivalencies) self.unit = new_unit @property def groups(self): if not hasattr(self, "_groups"): self._groups = groups.ColumnGroups(self) return self._groups def group_by(self, keys): """ Group this column by the specified ``keys`` This effectively splits the column into groups which correspond to unique values of the ``keys`` grouping object. The output is a new `Column` or `MaskedColumn` which contains a copy of this column but sorted by row according to ``keys``. The ``keys`` input to ``group_by`` must be a numpy array with the same length as this column. Parameters ---------- keys : numpy array Key grouping object Returns ------- out : Column New column with groups attribute set accordingly """ return groups.column_group_by(self, keys) def _copy_groups(self, out): """ Copy current groups into a copy of self ``out`` """ if self.parent_table: if hasattr(self.parent_table, "_groups"): out._groups = groups.ColumnGroups( out, indices=self.parent_table._groups._indices ) elif hasattr(self, "_groups"): out._groups = groups.ColumnGroups(out, indices=self._groups._indices) # Strip off the BaseColumn-ness for repr and str so that # MaskedColumn.data __repr__ does not include masked_BaseColumn(data = # [1 2], ...). def __repr__(self): return np.asarray(self).__repr__() @property def quantity(self): """ A view of this table column as a `~astropy.units.Quantity` object with units given by the Column's `unit` parameter. """ # the Quantity initializer is used here because it correctly fails # if the column's values are non-numeric (like strings), while .view # will happily return a quantity with gibberish for numerical values return Quantity( self, self.unit, copy=False, dtype=self.dtype, order="A", subok=True ) def to(self, unit, equivalencies=[], **kwargs): """ Converts this table column to a `~astropy.units.Quantity` object with the requested units. Parameters ---------- unit : unit-like The unit to convert to (i.e., a valid argument to the :meth:`astropy.units.Quantity.to` method). equivalencies : list of tuple Equivalencies to use for this conversion. See :meth:`astropy.units.Quantity.to` for more details. Returns ------- quantity : `~astropy.units.Quantity` A quantity object with the contents of this column in the units ``unit``. """ return self.quantity.to(unit, equivalencies) def _copy_attrs(self, obj): """ Copy key column attributes from ``obj`` to self """ for attr in ("name", "unit", "_format", "description"): val = getattr(obj, attr, None) setattr(self, attr, val) # Light copy of meta if it is not empty obj_meta = getattr(obj, "meta", None) if obj_meta: self.meta = obj_meta.copy() @staticmethod def _encode_str(value): """ Encode anything that is unicode-ish as utf-8. This method is only called for Py3+. """ if isinstance(value, str): value = value.encode("utf-8") elif isinstance(value, bytes) or value is np.ma.masked: pass else: arr = np.asarray(value) if arr.dtype.char == "U": arr = np.char.encode(arr, encoding="utf-8") if isinstance(value, np.ma.MaskedArray): arr = np.ma.array(arr, mask=value.mask, copy=False) value = arr return value def tolist(self): if self.dtype.kind == "S": return np.chararray.decode(self, encoding="utf-8").tolist() else: return super().tolist() class Column(BaseColumn): """Define a data column for use in a Table object. Parameters ---------- data : list, ndarray, or None Column data values name : str Column name and key for reference within Table dtype : `~numpy.dtype`-like Data type for column shape : tuple or () Dimensions of a single row element in the column data length : int or 0 Number of row elements in column data description : str or None Full description of column unit : str or None Physical unit format : str, None, or callable Format string for outputting column values. This can be an "old-style" (``format % value``) or "new-style" (`str.format`) format specification string or a function or any callable object that accepts a single value and returns a string. meta : dict-like or None Meta-data associated with the column Examples -------- A Column can be created in two different ways: - Provide a ``data`` value but not ``shape`` or ``length`` (which are inferred from the data). Examples:: col = Column(data=[1, 2], name='name') # shape=(2,) col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2) col = Column(data=[1, 2], name='name', dtype=float) col = Column(data=np.array([1, 2]), name='name') col = Column(data=['hello', 'world'], name='name') The ``dtype`` argument can be any value which is an acceptable fixed-size data-type initializer for the numpy.dtype() method. See `<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_. Examples include: - Python non-string type (float, int, bool) - Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_) - Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15') If no ``dtype`` value is provide then the type is inferred using ``np.array(data)``. - Provide ``length`` and optionally ``shape``, but not ``data`` Examples:: col = Column(name='name', length=5) col = Column(name='name', dtype=int, length=10, shape=(3,4)) The default ``dtype`` is ``np.float64``. The ``shape`` argument is the array shape of a single cell in the column. To access the ``Column`` data as a raw `numpy.ndarray` object, you can use one of the ``data`` or ``value`` attributes (which are equivalent):: col.data col.value """ def __new__( cls, data=None, name=None, dtype=None, shape=(), length=0, description=None, unit=None, format=None, meta=None, copy=False, copy_indices=True, ): if isinstance(data, MaskedColumn) and np.any(data.mask): raise TypeError( "Cannot convert a MaskedColumn with masked value to a Column" ) self = super().__new__( cls, data=data, name=name, dtype=dtype, shape=shape, length=length, description=description, unit=unit, format=format, meta=meta, copy=copy, copy_indices=copy_indices, ) return self def __setattr__(self, item, value): if not isinstance(self, MaskedColumn) and item == "mask": raise AttributeError( "cannot set mask value to a column in non-masked Table" ) super().__setattr__(item, value) if item == "unit" and issubclass(self.dtype.type, np.number): try: converted = self.parent_table._convert_col_for_table(self) except AttributeError: # Either no parent table or parent table is None pass else: if converted is not self: self.parent_table.replace_column(self.name, converted) def _base_repr_(self, html=False): # If scalar then just convert to correct numpy type and use numpy repr if self.ndim == 0: return repr(self.item()) descr_vals = [self.__class__.__name__] unit = None if self.unit is None else str(self.unit) shape = None if self.ndim <= 1 else self.shape[1:] for attr, val in ( ("name", self.name), ("dtype", dtype_info_name(self.dtype)), ("shape", shape), ("unit", unit), ("format", self.format), ("description", self.description), ("length", len(self)), ): if val is not None: descr_vals.append(f"{attr}={val!r}") descr = "<" + " ".join(descr_vals) + ">\n" if html: from astropy.utils.xml.writer import xml_escape descr = xml_escape(descr) data_lines, outs = self._formatter._pformat_col( self, show_name=False, show_unit=False, show_length=False, html=html ) out = descr + "\n".join(data_lines) return out def _repr_html_(self): return self._base_repr_(html=True) def __repr__(self): return self._base_repr_(html=False) def __str__(self): # If scalar then just convert to correct numpy type and use numpy repr if self.ndim == 0: return str(self.item()) lines, outs = self._formatter._pformat_col(self) return "\n".join(lines) def __bytes__(self): return str(self).encode("utf-8") def _check_string_truncate(self, value): """ Emit a warning if any elements of ``value`` will be truncated when ``value`` is assigned to self. """ # Convert input ``value`` to the string dtype of this column and # find the length of the longest string in the array. value = np.asanyarray(value, dtype=self.dtype.type) if value.size == 0: return value_str_len = np.char.str_len(value).max() # Parse the array-protocol typestring (e.g. '|U15') of self.dtype which # has the character repeat count on the right side. self_str_len = dtype_bytes_or_chars(self.dtype) if value_str_len > self_str_len: warnings.warn( "truncated right side string(s) longer than {} " "character(s) during assignment".format(self_str_len), StringTruncateWarning, stacklevel=3, ) def __setitem__(self, index, value): if self.dtype.char == "S": value = self._encode_str(value) # Issue warning for string assignment that truncates ``value`` if issubclass(self.dtype.type, np.character): self._check_string_truncate(value) # update indices self.info.adjust_indices(index, value, len(self)) # Set items using a view of the underlying data, as it gives an # order-of-magnitude speed-up. [#2994] self.data[index] = value __eq__ = _make_compare("__eq__") __ne__ = _make_compare("__ne__") __gt__ = _make_compare("__gt__") __lt__ = _make_compare("__lt__") __ge__ = _make_compare("__ge__") __le__ = _make_compare("__le__") def insert(self, obj, values, axis=0): """ Insert values before the given indices in the column and return a new `~astropy.table.Column` object. Parameters ---------- obj : int, slice or sequence of int Object that defines the index or indices before which ``values`` is inserted. values : array-like Value(s) to insert. If the type of ``values`` is different from that of the column, ``values`` is converted to the matching type. ``values`` should be shaped so that it can be broadcast appropriately. axis : int, optional Axis along which to insert ``values``. If ``axis`` is None then the column array is flattened before insertion. Default is 0, which will insert a row. Returns ------- out : `~astropy.table.Column` A copy of column with ``values`` and ``mask`` inserted. Note that the insertion does not occur in-place: a new column is returned. """ if self.dtype.kind == "O": # Even if values is array-like (e.g. [1,2,3]), insert as a single # object. Numpy.insert instead inserts each element in an array-like # input individually. data = np.insert(self, obj, None, axis=axis) data[obj] = values else: self_for_insert = _expand_string_array_for_values(self, values) data = np.insert(self_for_insert, obj, values, axis=axis) out = data.view(self.__class__) out.__array_finalize__(self) return out # We do this to make the methods show up in the API docs name = BaseColumn.name unit = BaseColumn.unit copy = BaseColumn.copy more = BaseColumn.more pprint = BaseColumn.pprint pformat = BaseColumn.pformat convert_unit_to = BaseColumn.convert_unit_to quantity = BaseColumn.quantity to = BaseColumn.to class MaskedColumnInfo(ColumnInfo): """ Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. In this case it just adds the ``mask_val`` attribute. """ # Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows # about. This allows customization of the way that MaskedColumn objects # get written to file depending on format. The default is to use whatever # the writer would normally do, which in the case of FITS or ECSV is to use # a NULL value within the data itself. If serialize_method is 'data_mask' # then the mask is explicitly written out as a separate column if there # are any masked values. See also code below. attr_names = ColumnInfo.attr_names | {"serialize_method"} # When `serialize_method` is 'data_mask', and data and mask are being written # as separate columns, use column names <name> and <name>.mask (instead # of default encoding as <name>.data and <name>.mask). _represent_as_dict_primary_data = "data" mask_val = np.ma.masked def __init__(self, bound=False): super().__init__(bound) # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. if bound: # Specify how to serialize this object depending on context. self.serialize_method = { "fits": "null_value", "ecsv": "null_value", "hdf5": "data_mask", "parquet": "data_mask", None: "null_value", } def _represent_as_dict(self): out = super()._represent_as_dict() # If we are a structured masked column, then our parent class, # ColumnInfo, will already have set up a dict with masked parts, # which will be serialized later, so no further work needed here. if self._parent.dtype.names is not None: return out col = self._parent # If the serialize method for this context (e.g. 'fits' or 'ecsv') is # 'data_mask', that means to serialize using an explicit mask column. method = self.serialize_method[self._serialize_context] if method == "data_mask": # Note: a driver here is a performance issue in #8443 where repr() of a # np.ma.MaskedArray value is up to 10 times slower than repr of a normal array # value. So regardless of whether there are masked elements it is useful to # explicitly define this as a serialized column and use col.data.data (ndarray) # instead of letting it fall through to the "standard" serialization machinery. out["data"] = col.data.data if np.any(col.mask): # Only if there are actually masked elements do we add the ``mask`` column out["mask"] = col.mask elif method == "null_value": pass else: raise ValueError( 'serialize method must be either "data_mask" or "null_value"' ) return out class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray): """Define a masked data column for use in a Table object. Parameters ---------- data : list, ndarray, or None Column data values name : str Column name and key for reference within Table mask : list, ndarray or None Boolean mask for which True indicates missing or invalid data fill_value : float, int, str, or None Value used when filling masked column elements dtype : `~numpy.dtype`-like Data type for column shape : tuple or () Dimensions of a single row element in the column data length : int or 0 Number of row elements in column data description : str or None Full description of column unit : str or None Physical unit format : str, None, or callable Format string for outputting column values. This can be an "old-style" (``format % value``) or "new-style" (`str.format`) format specification string or a function or any callable object that accepts a single value and returns a string. meta : dict-like or None Meta-data associated with the column Examples -------- A MaskedColumn is similar to a Column except that it includes ``mask`` and ``fill_value`` attributes. It can be created in two different ways: - Provide a ``data`` value but not ``shape`` or ``length`` (which are inferred from the data). Examples:: col = MaskedColumn(data=[1, 2], name='name') col = MaskedColumn(data=[1, 2], name='name', mask=[True, False]) col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99) The ``mask`` argument will be cast as a boolean array and specifies which elements are considered to be missing or invalid. The ``dtype`` argument can be any value which is an acceptable fixed-size data-type initializer for the numpy.dtype() method. See `<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_. Examples include: - Python non-string type (float, int, bool) - Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_) - Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15') If no ``dtype`` value is provide then the type is inferred using ``np.array(data)``. When ``data`` is provided then the ``shape`` and ``length`` arguments are ignored. - Provide ``length`` and optionally ``shape``, but not ``data`` Examples:: col = MaskedColumn(name='name', length=5) col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4)) The default ``dtype`` is ``np.float64``. The ``shape`` argument is the array shape of a single cell in the column. To access the ``Column`` data as a raw `numpy.ma.MaskedArray` object, you can use one of the ``data`` or ``value`` attributes (which are equivalent):: col.data col.value """ info = MaskedColumnInfo() def __new__( cls, data=None, name=None, mask=None, fill_value=None, dtype=None, shape=(), length=0, description=None, unit=None, format=None, meta=None, copy=False, copy_indices=True, ): if mask is None: # If mask is None then we need to determine the mask (if any) from the data. # The naive method is looking for a mask attribute on data, but this can fail, # see #8816. Instead use ``MaskedArray`` to do the work. mask = ma.MaskedArray(data).mask if mask is np.ma.nomask: # Handle odd-ball issue with np.ma.nomask (numpy #13758), and see below. mask = False elif copy: mask = mask.copy() elif mask is np.ma.nomask: # Force the creation of a full mask array as nomask is tricky to # use and will fail in an unexpected manner when setting a value # to the mask. mask = False else: mask = deepcopy(mask) # Create self using MaskedArray as a wrapper class, following the example of # class MSubArray in # https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py # This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and # https://github.com/astropy/astropy/commit/ff6039e8) # First just pass through all args and kwargs to BaseColumn, then wrap that object # with MaskedArray. self_data = BaseColumn( data, dtype=dtype, shape=shape, length=length, name=name, unit=unit, format=format, description=description, meta=meta, copy=copy, copy_indices=copy_indices, ) self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask) # The above process preserves info relevant for Column, but this does # not include serialize_method (and possibly other future attributes) # relevant for MaskedColumn, so we set info explicitly. if "info" in getattr(data, "__dict__", {}): self.info = data.info # Note: do not set fill_value in the MaskedArray constructor because this does not # go through the fill_value workarounds. if fill_value is None: data_fill_value = getattr(data, "fill_value", None) if ( data_fill_value is not None and data_fill_value != np.ma.default_fill_value(data.dtype) ): fill_value = np.array(data_fill_value, self.dtype)[()] self.fill_value = fill_value self.parent_table = None # needs to be done here since self doesn't come from BaseColumn.__new__ for index in self.indices: index.replace_col(self_data, self) return self @property def fill_value(self): return self.get_fill_value() # defer to native ma.MaskedArray method @fill_value.setter def fill_value(self, val): """Set fill value both in the masked column view and in the parent table if it exists. Setting one or the other alone doesn't work.""" # another ma bug workaround: If the value of fill_value for a string array is # requested but not yet set then it gets created as 'N/A'. From this point onward # any new fill_values are truncated to 3 characters. Note that this does not # occur if the masked array is a structured array (as in the previous block that # deals with the parent table). # # >>> x = ma.array(['xxxx']) # >>> x.fill_value # fill_value now gets represented as an 'S3' array # 'N/A' # >>> x.fill_value='yyyy' # >>> x.fill_value # 'yyy' # # To handle this we are forced to reset a private variable first: self._fill_value = None self.set_fill_value(val) # defer to native ma.MaskedArray method @property def data(self): """The plain MaskedArray data held by this column.""" out = self.view(np.ma.MaskedArray) # By default, a MaskedArray view will set the _baseclass to be the # same as that of our own class, i.e., BaseColumn. Since we want # to return a plain MaskedArray, we reset the baseclass accordingly. out._baseclass = np.ndarray return out def filled(self, fill_value=None): """Return a copy of self, with masked values filled with a given value. Parameters ---------- fill_value : scalar; optional The value to use for invalid entries (`None` by default). If `None`, the ``fill_value`` attribute of the array is used instead. Returns ------- filled_column : Column A copy of ``self`` with masked entries replaced by `fill_value` (be it the function argument or the attribute of ``self``). """ if fill_value is None: fill_value = self.fill_value data = super().filled(fill_value) # Use parent table definition of Column if available column_cls = ( self.parent_table.Column if (self.parent_table is not None) else Column ) out = column_cls( name=self.name, data=data, unit=self.unit, format=self.format, description=self.description, meta=deepcopy(self.meta), ) return out def insert(self, obj, values, mask=None, axis=0): """ Insert values along the given axis before the given indices and return a new `~astropy.table.MaskedColumn` object. Parameters ---------- obj : int, slice or sequence of int Object that defines the index or indices before which ``values`` is inserted. values : array-like Value(s) to insert. If the type of ``values`` is different from that of the column, ``values`` is converted to the matching type. ``values`` should be shaped so that it can be broadcast appropriately. mask : bool or array-like Mask value(s) to insert. If not supplied, and values does not have a mask either, then False is used. axis : int, optional Axis along which to insert ``values``. If ``axis`` is None then the column array is flattened before insertion. Default is 0, which will insert a row. Returns ------- out : `~astropy.table.MaskedColumn` A copy of column with ``values`` and ``mask`` inserted. Note that the insertion does not occur in-place: a new masked column is returned. """ self_ma = self.data # self viewed as MaskedArray if self.dtype.kind == "O": # Even if values is array-like (e.g. [1,2,3]), insert as a single # object. Numpy.insert instead inserts each element in an array-like # input individually. new_data = np.insert(self_ma.data, obj, None, axis=axis) new_data[obj] = values else: self_ma = _expand_string_array_for_values(self_ma, values) new_data = np.insert(self_ma.data, obj, values, axis=axis) if mask is None: mask = getattr(values, "mask", np.ma.nomask) if mask is np.ma.nomask: if self.dtype.kind == "O": mask = False else: mask = np.zeros(np.shape(values), dtype=bool) new_mask = np.insert(self_ma.mask, obj, mask, axis=axis) new_ma = np.ma.array(new_data, mask=new_mask, copy=False) out = new_ma.view(self.__class__) out.parent_table = None out.indices = [] out._copy_attrs(self) out.fill_value = self.fill_value return out def _copy_attrs_slice(self, out): # Fixes issue #3023: when calling getitem with a MaskedArray subclass # the original object attributes are not copied. if out.__class__ is self.__class__: # TODO: this part is essentially the same as what is done in # __array_finalize__ and could probably be called directly in our # override of __getitem__ in _columns_mixins.pyx). Refactor? if "info" in self.__dict__: out.info = self.info out.parent_table = None # we need this because __getitem__ does a shallow copy of indices if out.indices is self.indices: out.indices = [] out._copy_attrs(self) return out def __setitem__(self, index, value): # Issue warning for string assignment that truncates ``value`` if self.dtype.char == "S": value = self._encode_str(value) if issubclass(self.dtype.type, np.character): # Account for a bug in np.ma.MaskedArray setitem. # https://github.com/numpy/numpy/issues/8624 value = np.ma.asanyarray(value, dtype=self.dtype.type) # Check for string truncation after filling masked items with # empty (zero-length) string. Note that filled() does not make # a copy if there are no masked items. self._check_string_truncate(value.filled("")) # update indices self.info.adjust_indices(index, value, len(self)) ma.MaskedArray.__setitem__(self, index, value) # We do this to make the methods show up in the API docs name = BaseColumn.name copy = BaseColumn.copy more = BaseColumn.more pprint = BaseColumn.pprint pformat = BaseColumn.pformat convert_unit_to = BaseColumn.convert_unit_to
6363fd946e150f0e7bef695a2d1c32ee379c3dc0db71b0ffc8ae653d20a08b65
# Licensed under a 3-clause BSD style license - see LICENSE.rst import operator __all__ = ["BST"] class MaxValue: """ Represents an infinite value for purposes of tuple comparison. """ def __gt__(self, other): return True def __ge__(self, other): return True def __lt__(self, other): return False def __le__(self, other): return False def __repr__(self): return "MAX" __str__ = __repr__ class MinValue: """ The opposite of MaxValue, i.e. a representation of negative infinity. """ def __lt__(self, other): return True def __le__(self, other): return True def __gt__(self, other): return False def __ge__(self, other): return False def __repr__(self): return "MIN" __str__ = __repr__ class Epsilon: """ Represents the "next largest" version of a given value, so that for all valid comparisons we have x < y < Epsilon(y) < z whenever x < y < z and x, z are not Epsilon objects. Parameters ---------- val : object Original value """ __slots__ = ("val",) def __init__(self, val): self.val = val def __lt__(self, other): if self.val == other: return False return self.val < other def __gt__(self, other): if self.val == other: return True return self.val > other def __eq__(self, other): return False def __repr__(self): return repr(self.val) + " + epsilon" class Node: """ An element in a binary search tree, containing a key, data, and references to children nodes and a parent node. Parameters ---------- key : tuple Node key data : list or int Node data """ __lt__ = lambda x, y: x.key < y.key __le__ = lambda x, y: x.key <= y.key __eq__ = lambda x, y: x.key == y.key __ge__ = lambda x, y: x.key >= y.key __gt__ = lambda x, y: x.key > y.key __ne__ = lambda x, y: x.key != y.key __slots__ = ("key", "data", "left", "right") # each node has a key and data list def __init__(self, key, data): self.key = key self.data = data if isinstance(data, list) else [data] self.left = None self.right = None def replace(self, child, new_child): """ Replace this node's child with a new child. """ if self.left is not None and self.left == child: self.left = new_child elif self.right is not None and self.right == child: self.right = new_child else: raise ValueError("Cannot call replace() on non-child") def remove(self, child): """ Remove the given child. """ self.replace(child, None) def set(self, other): """ Copy the given node. """ self.key = other.key self.data = other.data[:] def __str__(self): return str((self.key, self.data)) def __repr__(self): return str(self) class BST: """ A basic binary search tree in pure Python, used as an engine for indexing. Parameters ---------- data : Table Sorted columns of the original table row_index : Column object Row numbers corresponding to data columns unique : bool Whether the values of the index must be unique. Defaults to False. """ NodeClass = Node def __init__(self, data, row_index, unique=False): self.root = None self.size = 0 self.unique = unique for key, row in zip(data, row_index): self.add(tuple(key), row) def add(self, key, data=None): """ Add a key, data pair. """ if data is None: data = key self.size += 1 node = self.NodeClass(key, data) curr_node = self.root if curr_node is None: self.root = node return while True: if node < curr_node: if curr_node.left is None: curr_node.left = node break curr_node = curr_node.left elif node > curr_node: if curr_node.right is None: curr_node.right = node break curr_node = curr_node.right elif self.unique: raise ValueError("Cannot insert non-unique value") else: # add data to node curr_node.data.extend(node.data) curr_node.data = sorted(curr_node.data) return def find(self, key): """ Return all data values corresponding to a given key. Parameters ---------- key : tuple Input key Returns ------- data_vals : list List of rows corresponding to the input key """ node, parent = self.find_node(key) return node.data if node is not None else [] def find_node(self, key): """ Find the node associated with the given key. """ if self.root is None: return (None, None) return self._find_recursive(key, self.root, None) def shift_left(self, row): """ Decrement all rows larger than the given row. """ for node in self.traverse(): node.data = [x - 1 if x > row else x for x in node.data] def shift_right(self, row): """ Increment all rows greater than or equal to the given row. """ for node in self.traverse(): node.data = [x + 1 if x >= row else x for x in node.data] def _find_recursive(self, key, node, parent): try: if key == node.key: return (node, parent) elif key > node.key: if node.right is None: return (None, None) return self._find_recursive(key, node.right, node) else: if node.left is None: return (None, None) return self._find_recursive(key, node.left, node) except TypeError: # wrong key type return (None, None) def traverse(self, order="inorder"): """ Return nodes of the BST in the given order. Parameters ---------- order : str The order in which to recursively search the BST. Possible values are: "preorder": current node, left subtree, right subtree "inorder": left subtree, current node, right subtree "postorder": left subtree, right subtree, current node """ if order == "preorder": return self._preorder(self.root, []) elif order == "inorder": return self._inorder(self.root, []) elif order == "postorder": return self._postorder(self.root, []) raise ValueError(f'Invalid traversal method: "{order}"') def items(self): """ Return BST items in order as (key, data) pairs. """ return [(x.key, x.data) for x in self.traverse()] def sort(self): """ Make row order align with key order. """ i = 0 for node in self.traverse(): num_rows = len(node.data) node.data = [x for x in range(i, i + num_rows)] i += num_rows def sorted_data(self): """ Return BST rows sorted by key values. """ return [x for node in self.traverse() for x in node.data] def _preorder(self, node, lst): if node is None: return lst lst.append(node) self._preorder(node.left, lst) self._preorder(node.right, lst) return lst def _inorder(self, node, lst): if node is None: return lst self._inorder(node.left, lst) lst.append(node) self._inorder(node.right, lst) return lst def _postorder(self, node, lst): if node is None: return lst self._postorder(node.left, lst) self._postorder(node.right, lst) lst.append(node) return lst def _substitute(self, node, parent, new_node): if node is self.root: self.root = new_node else: parent.replace(node, new_node) def remove(self, key, data=None): """ Remove data corresponding to the given key. Parameters ---------- key : tuple The key to remove data : int or None If None, remove the node corresponding to the given key. If not None, remove only the given data value from the node. Returns ------- successful : bool True if removal was successful, false otherwise """ node, parent = self.find_node(key) if node is None: return False if data is not None: if data not in node.data: raise ValueError("Data does not belong to correct node") elif len(node.data) > 1: node.data.remove(data) return True if node.left is None and node.right is None: self._substitute(node, parent, None) elif node.left is None and node.right is not None: self._substitute(node, parent, node.right) elif node.right is None and node.left is not None: self._substitute(node, parent, node.left) else: # find largest element of left subtree curr_node = node.left parent = node while curr_node.right is not None: parent = curr_node curr_node = curr_node.right self._substitute(curr_node, parent, curr_node.left) node.set(curr_node) self.size -= 1 return True def is_valid(self): """ Returns whether this is a valid BST. """ return self._is_valid(self.root) def _is_valid(self, node): if node is None: return True return ( (node.left is None or node.left <= node) and (node.right is None or node.right >= node) and self._is_valid(node.left) and self._is_valid(node.right) ) def range(self, lower, upper, bounds=(True, True)): """ Return all nodes with keys in the given range. Parameters ---------- lower : tuple Lower bound upper : tuple Upper bound bounds : (2,) tuple of bool Indicates whether the search should be inclusive or exclusive with respect to the endpoints. The first argument corresponds to an inclusive lower bound, and the second argument to an inclusive upper bound. """ nodes = self.range_nodes(lower, upper, bounds) return [x for node in nodes for x in node.data] def range_nodes(self, lower, upper, bounds=(True, True)): """ Return nodes in the given range. """ if self.root is None: return [] # op1 is <= or <, op2 is >= or > op1 = operator.le if bounds[0] else operator.lt op2 = operator.ge if bounds[1] else operator.gt return self._range(lower, upper, op1, op2, self.root, []) def same_prefix(self, val): """ Assuming the given value has smaller length than keys, return nodes whose keys have this value as a prefix. """ if self.root is None: return [] nodes = self._same_prefix(val, self.root, []) return [x for node in nodes for x in node.data] def _range(self, lower, upper, op1, op2, node, lst): if op1(lower, node.key) and op2(upper, node.key): lst.append(node) if upper > node.key and node.right is not None: self._range(lower, upper, op1, op2, node.right, lst) if lower < node.key and node.left is not None: self._range(lower, upper, op1, op2, node.left, lst) return lst def _same_prefix(self, val, node, lst): prefix = node.key[: len(val)] if prefix == val: lst.append(node) if prefix <= val and node.right is not None: self._same_prefix(val, node.right, lst) if prefix >= val and node.left is not None: self._same_prefix(val, node.left, lst) return lst def __repr__(self): return f"<{self.__class__.__name__}>" def _print(self, node, level): line = "\t" * level + str(node) + "\n" if node.left is not None: line += self._print(node.left, level + 1) if node.right is not None: line += self._print(node.right, level + 1) return line @property def height(self): """ Return the BST height. """ return self._height(self.root) def _height(self, node): if node is None: return -1 return max(self._height(node.left), self._height(node.right)) + 1 def replace_rows(self, row_map): """ Replace all rows with the values they map to in the given dictionary. Any rows not present as keys in the dictionary will have their nodes deleted. Parameters ---------- row_map : dict Mapping of row numbers to new row numbers """ for key, data in self.items(): data[:] = [row_map[x] for x in data if x in row_map]
9fd99f90582be0d7119a0a0ddb22a804ac1654ca95b18493b6aad5d3ce579dfb
# Licensed under a 3-clause BSD style license - see LICENSE.rst import fnmatch import os import re import sys import numpy as np from astropy import log from astropy.utils.console import Getch, color_print, conf, terminal_size from astropy.utils.data_info import dtype_info_name __all__ = [] def default_format_func(format_, val): if isinstance(val, bytes): return val.decode("utf-8", errors="replace") else: return str(val) # The first three functions are helpers for _auto_format_func def _use_str_for_masked_values(format_func): """Wrap format function to trap masked values. String format functions and most user functions will not be able to deal with masked values, so we wrap them to ensure they are passed to str(). """ return lambda format_, val: ( str(val) if val is np.ma.masked else format_func(format_, val) ) def _possible_string_format_functions(format_): """Iterate through possible string-derived format functions. A string can either be a format specifier for the format built-in, a new-style format string, or an old-style format string. """ yield lambda format_, val: format(val, format_) yield lambda format_, val: format_.format(val) yield lambda format_, val: format_ % val yield lambda format_, val: format_.format(**{k: val[k] for k in val.dtype.names}) def get_auto_format_func( col=None, possible_string_format_functions=_possible_string_format_functions ): """ Return a wrapped ``auto_format_func`` function which is used in formatting table columns. This is primarily an internal function but gets used directly in other parts of astropy, e.g. `astropy.io.ascii`. Parameters ---------- col_name : object, optional Hashable object to identify column like id or name. Default is None. possible_string_format_functions : func, optional Function that yields possible string formatting functions (defaults to internal function to do this). Returns ------- Wrapped ``auto_format_func`` function """ def _auto_format_func(format_, val): """Format ``val`` according to ``format_`` for a plain format specifier, old- or new-style format strings, or using a user supplied function. More importantly, determine and cache (in _format_funcs) a function that will do this subsequently. In this way this complicated logic is only done for the first value. Returns the formatted value. """ if format_ is None: return default_format_func(format_, val) if format_ in col.info._format_funcs: return col.info._format_funcs[format_](format_, val) if callable(format_): format_func = lambda format_, val: format_(val) try: out = format_func(format_, val) if not isinstance(out, str): raise ValueError( "Format function for value {} returned {} " "instead of string type".format(val, type(val)) ) except Exception as err: # For a masked element, the format function call likely failed # to handle it. Just return the string representation for now, # and retry when a non-masked value comes along. if val is np.ma.masked: return str(val) raise ValueError(f"Format function for value {val} failed.") from err # If the user-supplied function handles formatting masked elements, use # it directly. Otherwise, wrap it in a function that traps them. try: format_func(format_, np.ma.masked) except Exception: format_func = _use_str_for_masked_values(format_func) else: # For a masked element, we cannot set string-based format functions yet, # as all tests below will fail. Just return the string representation # of masked for now, and retry when a non-masked value comes along. if val is np.ma.masked: return str(val) for format_func in possible_string_format_functions(format_): try: # Does this string format method work? out = format_func(format_, val) # Require that the format statement actually did something. if out == format_: raise ValueError("the format passed in did nothing.") except Exception: continue else: break else: # None of the possible string functions passed muster. raise ValueError( f"unable to parse format string {format_} for its column." ) # String-based format functions will fail on masked elements; # wrap them in a function that traps them. format_func = _use_str_for_masked_values(format_func) col.info._format_funcs[format_] = format_func return out return _auto_format_func def _get_pprint_include_names(table): """Get the set of names to show in pprint from the table pprint_include_names and pprint_exclude_names attributes. These may be fnmatch unix-style globs. """ def get_matches(name_globs, default): match_names = set() if name_globs: # For None or () use the default for name in table.colnames: for name_glob in name_globs: if fnmatch.fnmatch(name, name_glob): match_names.add(name) break else: match_names.update(default) return match_names include_names = get_matches(table.pprint_include_names(), table.colnames) exclude_names = get_matches(table.pprint_exclude_names(), []) return include_names - exclude_names class TableFormatter: @staticmethod def _get_pprint_size(max_lines=None, max_width=None): """Get the output size (number of lines and character width) for Column and Table pformat/pprint methods. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default will be determined using the ``astropy.table.conf.max_lines`` configuration item. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for max_width except the configuration item is ``astropy.table.conf.max_width``. Parameters ---------- max_lines : int or None Maximum lines of output (header + data rows) max_width : int or None Maximum width (characters) output Returns ------- max_lines, max_width : int """ # Declare to keep static type checker happy. lines = None width = None if max_lines is None: max_lines = conf.max_lines if max_width is None: max_width = conf.max_width if max_lines is None or max_width is None: lines, width = terminal_size() if max_lines is None: max_lines = lines elif max_lines < 0: max_lines = sys.maxsize if max_lines < 8: max_lines = 8 if max_width is None: max_width = width elif max_width < 0: max_width = sys.maxsize if max_width < 10: max_width = 10 return max_lines, max_width def _pformat_col( self, col, max_lines=None, show_name=True, show_unit=None, show_dtype=False, show_length=None, html=False, align=None, ): """Return a list of formatted string representation of column values. Parameters ---------- max_lines : int Maximum lines of output (header + data rows) show_name : bool Include column name. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include column dtype. Default is False. show_length : bool Include column length at end. Default is to show this only if the column is not shown completely. html : bool Output column as HTML align : str Left/right alignment of columns. Default is '>' (right) for all columns. Other allowed values are '<', '^', and '0=' for left, centered, and 0-padded, respectively. Returns ------- lines : list List of lines with formatted column values outs : dict Dict which is used to pass back additional values defined within the iterator. """ if show_unit is None: show_unit = col.info.unit is not None outs = {} # Some values from _pformat_col_iter iterator that are needed here col_strs_iter = self._pformat_col_iter( col, max_lines, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, show_length=show_length, outs=outs, ) # Replace tab and newline with text representations so they display nicely. # Newline in particular is a problem in a multicolumn table. col_strs = [ val.replace("\t", "\\t").replace("\n", "\\n") for val in col_strs_iter ] if len(col_strs) > 0: col_width = max(len(x) for x in col_strs) if html: from astropy.utils.xml.writer import xml_escape n_header = outs["n_header"] for i, col_str in enumerate(col_strs): # _pformat_col output has a header line '----' which is not needed here if i == n_header - 1: continue td = "th" if i < n_header else "td" val = f"<{td}>{xml_escape(col_str.strip())}</{td}>" row = "<tr>" + val + "</tr>" if i < n_header: row = "<thead>" + row + "</thead>" col_strs[i] = row if n_header > 0: # Get rid of '---' header line col_strs.pop(n_header - 1) col_strs.insert(0, "<table>") col_strs.append("</table>") # Now bring all the column string values to the same fixed width else: col_width = max(len(x) for x in col_strs) if col_strs else 1 # Center line header content and generate dashed headerline for i in outs["i_centers"]: col_strs[i] = col_strs[i].center(col_width) if outs["i_dashes"] is not None: col_strs[outs["i_dashes"]] = "-" * col_width # Format columns according to alignment. `align` arg has precedent, otherwise # use `col.format` if it starts as a legal alignment string. If neither applies # then right justify. re_fill_align = re.compile(r"(?P<fill>.?)(?P<align>[<^>=])") match = None if align: # If there is an align specified then it must match match = re_fill_align.match(align) if not match: raise ValueError( "column align must be one of '<', '^', '>', or '='" ) elif isinstance(col.info.format, str): # col.info.format need not match, in which case rjust gets used match = re_fill_align.match(col.info.format) if match: fill_char = match.group("fill") align_char = match.group("align") if align_char == "=": if fill_char != "0": raise ValueError("fill character must be '0' for '=' align") # str.zfill gets used which does not take fill char arg fill_char = "" else: fill_char = "" align_char = ">" justify_methods = {"<": "ljust", "^": "center", ">": "rjust", "=": "zfill"} justify_method = justify_methods[align_char] justify_args = (col_width, fill_char) if fill_char else (col_width,) for i, col_str in enumerate(col_strs): col_strs[i] = getattr(col_str, justify_method)(*justify_args) if outs["show_length"]: col_strs.append(f"Length = {len(col)} rows") return col_strs, outs def _name_and_structure(self, name, dtype, sep=" "): """Format a column name, including a possible structure. Normally, just returns the name, but if it has a structured dtype, will add the parts in between square brackets. E.g., "name [f0, f1]" or "name [f0[sf0, sf1], f1]". """ if dtype is None or dtype.names is None: return name structure = ", ".join( [ self._name_and_structure(name, dt, sep="") for name, (dt, _) in dtype.fields.items() ] ) return f"{name}{sep}[{structure}]" def _pformat_col_iter( self, col, max_lines, show_name, show_unit, outs, show_dtype=False, show_length=None, ): """Iterator which yields formatted string representation of column values. Parameters ---------- max_lines : int Maximum lines of output (header + data rows) show_name : bool Include column name. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. outs : dict Must be a dict which is used to pass back additional values defined within the iterator. show_dtype : bool Include column dtype. Default is False. show_length : bool Include column length at end. Default is to show this only if the column is not shown completely. """ max_lines, _ = self._get_pprint_size(max_lines, -1) dtype = getattr(col, "dtype", None) multidims = getattr(col, "shape", [0])[1:] if multidims: multidim0 = tuple(0 for n in multidims) multidim1 = tuple(n - 1 for n in multidims) multidims_all_ones = np.prod(multidims) == 1 multidims_has_zero = 0 in multidims i_dashes = None i_centers = [] # Line indexes where content should be centered n_header = 0 if show_name: i_centers.append(n_header) # Get column name (or 'None' if not set) col_name = str(col.info.name) n_header += 1 yield self._name_and_structure(col_name, dtype) if show_unit: i_centers.append(n_header) n_header += 1 yield str(col.info.unit or "") if show_dtype: i_centers.append(n_header) n_header += 1 if dtype is not None: col_dtype = dtype_info_name((dtype, multidims)) else: col_dtype = col.__class__.__qualname__ or "object" yield col_dtype if show_unit or show_name or show_dtype: i_dashes = n_header n_header += 1 yield "---" max_lines -= n_header n_print2 = max_lines // 2 n_rows = len(col) # This block of code is responsible for producing the function that # will format values for this column. The ``format_func`` function # takes two args (col_format, val) and returns the string-formatted # version. Some points to understand: # # - col_format could itself be the formatting function, so it will # actually end up being called with itself as the first arg. In # this case the function is expected to ignore its first arg. # # - auto_format_func is a function that gets called on the first # column value that is being formatted. It then determines an # appropriate formatting function given the actual value to be # formatted. This might be deterministic or it might involve # try/except. The latter allows for different string formatting # options like %f or {:5.3f}. When auto_format_func is called it: # 1. Caches the function in the _format_funcs dict so for subsequent # values the right function is called right away. # 2. Returns the formatted value. # # - possible_string_format_functions is a function that yields a # succession of functions that might successfully format the # value. There is a default, but Mixin methods can override this. # See Quantity for an example. # # - get_auto_format_func() returns a wrapped version of auto_format_func # with the column id and possible_string_format_functions as # enclosed variables. col_format = col.info.format or getattr(col.info, "default_format", None) pssf = ( getattr(col.info, "possible_string_format_functions", None) or _possible_string_format_functions ) auto_format_func = get_auto_format_func(col, pssf) format_func = col.info._format_funcs.get(col_format, auto_format_func) if len(col) > max_lines: if show_length is None: show_length = True i0 = n_print2 - (1 if show_length else 0) i1 = n_rows - n_print2 - max_lines % 2 indices = np.concatenate( [np.arange(0, i0 + 1), np.arange(i1 + 1, len(col))] ) else: i0 = -1 indices = np.arange(len(col)) def format_col_str(idx): if multidims: # Prevents columns like Column(data=[[(1,)],[(2,)]], name='a') # with shape (n,1,...,1) from being printed as if there was # more than one element in a row if multidims_all_ones: return format_func(col_format, col[(idx,) + multidim0]) elif multidims_has_zero: # Any zero dimension means there is no data to print return "" else: left = format_func(col_format, col[(idx,) + multidim0]) right = format_func(col_format, col[(idx,) + multidim1]) return f"{left} .. {right}" else: return format_func(col_format, col[idx]) # Add formatted values if within bounds allowed by max_lines for idx in indices: if idx == i0: yield "..." else: try: yield format_col_str(idx) except ValueError: raise ValueError( 'Unable to parse format string "{}" for entry "{}" ' 'in column "{}"'.format(col_format, col[idx], col.info.name) ) outs["show_length"] = show_length outs["n_header"] = n_header outs["i_centers"] = i_centers outs["i_dashes"] = i_dashes def _pformat_table( self, table, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, html=False, tableid=None, tableclass=None, align=None, ): """Return a list of lines for the formatted string representation of the table. Parameters ---------- max_lines : int or None Maximum number of rows to output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is to False. html : bool Format the output as an HTML table. Default is False. tableid : str or None An ID tag for the table; only used if html is set. Default is "table{id}", where id is the unique integer id of the table object, id(table) tableclass : str or list of str or None CSS classes for the table; only used if html is set. Default is none align : str or list or tuple Left/right alignment of columns. Default is '>' (right) for all columns. Other allowed values are '<', '^', and '0=' for left, centered, and 0-padded, respectively. A list of strings can be provided for alignment of tables with multiple columns. Returns ------- rows : list Formatted table as a list of strings outs : dict Dict which is used to pass back additional values defined within the iterator. """ # "Print" all the values into temporary lists by column for subsequent # use and to determine the width max_lines, max_width = self._get_pprint_size(max_lines, max_width) if show_unit is None: show_unit = any(col.info.unit for col in table.columns.values()) # Coerce align into a correctly-sized list of alignments (if possible) n_cols = len(table.columns) if align is None or isinstance(align, str): align = [align] * n_cols elif isinstance(align, (list, tuple)): if len(align) != n_cols: raise ValueError( "got {} alignment values instead of " "the number of columns ({})".format(len(align), n_cols) ) else: raise TypeError( "align keyword must be str or list or tuple (got {})".format( type(align) ) ) # Process column visibility from table pprint_include_names and # pprint_exclude_names attributes and get the set of columns to show. pprint_include_names = _get_pprint_include_names(table) cols = [] outs = None # Initialize so static type checker is happy for align_, col in zip(align, table.columns.values()): if col.info.name not in pprint_include_names: continue lines, outs = self._pformat_col( col, max_lines, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, align=align_, ) if outs["show_length"]: lines = lines[:-1] cols.append(lines) if not cols: return ["<No columns>"], {"show_length": False} # Use the values for the last column since they are all the same n_header = outs["n_header"] n_rows = len(cols[0]) def outwidth(cols): return sum(len(c[0]) for c in cols) + len(cols) - 1 dots_col = ["..."] * n_rows middle = len(cols) // 2 while outwidth(cols) > max_width: if len(cols) == 1: break if len(cols) == 2: cols[1] = dots_col break if cols[middle] is dots_col: cols.pop(middle) middle = len(cols) // 2 cols[middle] = dots_col # Now "print" the (already-stringified) column values into a # row-oriented list. rows = [] if html: from astropy.utils.xml.writer import xml_escape if tableid is None: tableid = f"table{id(table)}" if tableclass is not None: if isinstance(tableclass, list): tableclass = " ".join(tableclass) rows.append(f'<table id="{tableid}" class="{tableclass}">') else: rows.append(f'<table id="{tableid}">') for i in range(n_rows): # _pformat_col output has a header line '----' which is not needed here if i == n_header - 1: continue td = "th" if i < n_header else "td" vals = (f"<{td}>{xml_escape(col[i].strip())}</{td}>" for col in cols) row = "<tr>" + "".join(vals) + "</tr>" if i < n_header: row = "<thead>" + row + "</thead>" rows.append(row) rows.append("</table>") else: for i in range(n_rows): row = " ".join(col[i] for col in cols) rows.append(row) return rows, outs def _more_tabcol( self, tabcol, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, ): """Interactive "more" of a table or column. Parameters ---------- max_lines : int or None Maximum number of rows to output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is False. """ allowed_keys = "f br<>qhpn" # Count the header lines n_header = 0 if show_name: n_header += 1 if show_unit: n_header += 1 if show_dtype: n_header += 1 if show_name or show_unit or show_dtype: n_header += 1 # Set up kwargs for pformat call. Only Table gets max_width. kwargs = dict( max_lines=-1, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, ) if hasattr(tabcol, "columns"): # tabcol is a table kwargs["max_width"] = max_width # If max_lines is None (=> query screen size) then increase by 2. # This is because get_pprint_size leaves 6 extra lines so that in # ipython you normally see the last input line. max_lines1, max_width = self._get_pprint_size(max_lines, max_width) if max_lines is None: max_lines1 += 2 delta_lines = max_lines1 - n_header # Set up a function to get a single character on any platform inkey = Getch() i0 = 0 # First table/column row to show showlines = True while True: i1 = i0 + delta_lines # Last table/col row to show if showlines: # Don't always show the table (e.g. after help) try: os.system("cls" if os.name == "nt" else "clear") except Exception: pass # No worries if clear screen call fails lines = tabcol[i0:i1].pformat(**kwargs) colors = ( "red" if i < n_header else "default" for i in range(len(lines)) ) for color, line in zip(colors, lines): color_print(line, color) showlines = True print() print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=" ") # Get a valid key while True: try: key = inkey().lower() except Exception: print("\n") log.error( "Console does not support getting a character" " as required by more(). Use pprint() instead." ) return if key in allowed_keys: break print(key) if key.lower() == "q": break elif key == " " or key == "f": i0 += delta_lines elif key == "b": i0 = i0 - delta_lines elif key == "r": pass elif key == "<": i0 = 0 elif key == ">": i0 = len(tabcol) elif key == "p": i0 -= 1 elif key == "n": i0 += 1 elif key == "h": showlines = False print( """ Browsing keys: f, <space> : forward one page b : back one page r : refresh same page n : next row p : previous row < : go to beginning > : go to end q : quit browsing h : print this help""", end=" ", ) if i0 < 0: i0 = 0 if i0 >= len(tabcol) - delta_lines: i0 = len(tabcol) - delta_lines print("\n")
1306f31ef35375957c51a2c077b81d884b0b6d78a7cbd43f599d85549f567e73
""" High-level table operations: - join() - setdiff() - hstack() - vstack() - dstack() """ # Licensed under a 3-clause BSD style license - see LICENSE.rst import collections import itertools from collections import Counter, OrderedDict from collections.abc import Mapping, Sequence from copy import deepcopy import numpy as np from astropy.units import Quantity from astropy.utils import metadata from astropy.utils.masked import Masked from . import _np_utils from .np_utils import TableMergeError from .table import Column, MaskedColumn, QTable, Row, Table __all__ = [ "join", "setdiff", "hstack", "vstack", "unique", "join_skycoord", "join_distance", ] __doctest_requires__ = {"join_skycoord": ["scipy"], "join_distance": ["scipy"]} def _merge_table_meta(out, tables, metadata_conflicts="warn"): out_meta = deepcopy(tables[0].meta) for table in tables[1:]: out_meta = metadata.merge( out_meta, table.meta, metadata_conflicts=metadata_conflicts ) out.meta.update(out_meta) def _get_list_of_tables(tables): """ Check that tables is a Table or sequence of Tables. Returns the corresponding list of Tables. """ # Make sure we have a list of things if not isinstance(tables, Sequence): tables = [tables] # Make sure there is something to stack if len(tables) == 0: raise ValueError("no values provided to stack.") # Convert inputs (Table, Row, or anything column-like) to Tables. # Special case that Quantity converts to a QTable. for ii, val in enumerate(tables): if isinstance(val, Table): pass elif isinstance(val, Row): tables[ii] = Table(val) elif isinstance(val, Quantity): tables[ii] = QTable([val]) else: try: tables[ii] = Table([val]) except (ValueError, TypeError) as err: raise TypeError(f"Cannot convert {val} to table column.") from err return tables def _get_out_class(objs): """ From a list of input objects ``objs`` get merged output object class. This is just taken as the deepest subclass. This doesn't handle complicated inheritance schemes, but as a special case, classes which share ``info`` are taken to be compatible. """ out_class = objs[0].__class__ for obj in objs[1:]: if issubclass(obj.__class__, out_class): out_class = obj.__class__ if any( not ( issubclass(out_class, obj.__class__) or out_class.info is obj.__class__.info ) for obj in objs ): raise ValueError( "unmergeable object classes {}".format( [obj.__class__.__name__ for obj in objs] ) ) return out_class def join_skycoord(distance, distance_func="search_around_sky"): """Helper function to join on SkyCoord columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are both ``SkyCoord`` objects, matched by computing the distance between points and accepting values below ``distance``. The distance cross-matching is done using either `~astropy.coordinates.search_around_sky` or `~astropy.coordinates.search_around_3d`, depending on the value of ``distance_func``. The default is ``'search_around_sky'``. One can also provide a function object for ``distance_func``, in which case it must be a function that follows the same input and output API as `~astropy.coordinates.search_around_sky`. In this case the function will be called with ``(skycoord1, skycoord2, distance)`` as arguments. Parameters ---------- distance : `~astropy.units.Quantity` ['angle', 'length'] Maximum distance between points to be considered a join match. Must have angular or distance units. distance_func : str or function Specifies the function for performing the cross-match based on ``distance``. If supplied as a string this specifies the name of a function in `astropy.coordinates`. If supplied as a function then that function is called directly. Returns ------- join_func : function Function that accepts two ``SkyCoord`` columns (col1, col2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- This example shows an inner join of two ``SkyCoord`` columns, taking any sources within 0.2 deg to be a match. Note the new ``sc_id`` column which is added and provides a unique source identifier for the matches. >>> from astropy.coordinates import SkyCoord >>> import astropy.units as u >>> from astropy.table import Table, join_skycoord >>> from astropy import table >>> sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg') >>> sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg') >>> join_func = join_skycoord(0.2 * u.deg) >>> join_func(sc1, sc2) # Associate each coordinate with unique source ID (array([3, 1, 1, 2]), array([4, 1, 2])) >>> t1 = Table([sc1], names=['sc']) >>> t2 = Table([sc2], names=['sc']) >>> t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)}) >>> print(t12) # Note new `sc_id` column with the IDs from join_func() sc_id sc_1 sc_2 deg,deg deg,deg ----- ------- -------- 1 1.0,0.0 1.05,0.0 1 1.1,0.0 1.05,0.0 2 2.0,0.0 2.1,0.0 """ if isinstance(distance_func, str): import astropy.coordinates as coords try: distance_func = getattr(coords, distance_func) except AttributeError as err: raise ValueError( "distance_func must be a function in astropy.coordinates" ) from err else: from inspect import isfunction if not isfunction(distance_func): raise ValueError("distance_func must be a str or function") def join_func(sc1, sc2): # Call the appropriate SkyCoord method to find pairs within distance idxs1, idxs2, d2d, d3d = distance_func(sc1, sc2, distance) # Now convert that into unique identifiers for each near-pair. This is # taken to be transitive, so that if points 1 and 2 are "near" and points # 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier. # This identifier will then be used in the table join matching. # Identifiers for each column, initialized to all zero. ids1 = np.zeros(len(sc1), dtype=int) ids2 = np.zeros(len(sc2), dtype=int) # Start the identifier count at 1 id_ = 1 for idx1, idx2 in zip(idxs1, idxs2): # If this col1 point is previously identified then set corresponding # col2 point to same identifier. Likewise for col2 and col1. if ids1[idx1] > 0: ids2[idx2] = ids1[idx1] elif ids2[idx2] > 0: ids1[idx1] = ids2[idx2] else: # Not yet seen so set identifier for col1 and col2 ids1[idx1] = id_ ids2[idx2] = id_ id_ += 1 # Fill in unique identifiers for points with no near neighbor for ids in (ids1, ids2): for idx in np.flatnonzero(ids == 0): ids[idx] = id_ id_ += 1 # End of enclosure join_func() return ids1, ids2 return join_func def join_distance(distance, kdtree_args=None, query_args=None): """Helper function to join table columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are matched by computing the distance between points and accepting values below ``distance``. This numerical "fuzzy" match can apply to 1-D or 2-D columns, where in the latter case the distance is a vector distance. The distance cross-matching is done using `scipy.spatial.cKDTree`. If necessary you can tweak the default behavior by providing ``dict`` values for the ``kdtree_args`` or ``query_args``. Parameters ---------- distance : float or `~astropy.units.Quantity` ['length'] Maximum distance between points to be considered a join match kdtree_args : dict, None Optional extra args for `~scipy.spatial.cKDTree` query_args : dict, None Optional extra args for `~scipy.spatial.cKDTree.query_ball_tree` Returns ------- join_func : function Function that accepts (skycoord1, skycoord2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- >>> from astropy.table import Table, join_distance >>> from astropy import table >>> c1 = [0, 1, 1.1, 2] >>> c2 = [0.5, 1.05, 2.1] >>> t1 = Table([c1], names=['col']) >>> t2 = Table([c2], names=['col']) >>> t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_distance(0.2)}) >>> print(t12) col_id col_1 col_2 ------ ----- ----- 1 1.0 1.05 1 1.1 1.05 2 2.0 2.1 3 0.0 -- 4 -- 0.5 """ try: from scipy.spatial import cKDTree except ImportError as exc: raise ImportError("scipy is required to use join_distance()") from exc if kdtree_args is None: kdtree_args = {} if query_args is None: query_args = {} def join_func(col1, col2): if col1.ndim > 2 or col2.ndim > 2: raise ValueError("columns for isclose_join must be 1- or 2-dimensional") if isinstance(distance, Quantity): # Convert to np.array with common unit col1 = col1.to_value(distance.unit) col2 = col2.to_value(distance.unit) dist = distance.value else: # Convert to np.array to allow later in-place shape changing col1 = np.asarray(col1) col2 = np.asarray(col2) dist = distance # Ensure columns are pure np.array and are 2-D for use with KDTree if col1.ndim == 1: col1.shape = col1.shape + (1,) if col2.ndim == 1: col2.shape = col2.shape + (1,) # Cross-match col1 and col2 within dist using KDTree kd1 = cKDTree(col1, **kdtree_args) kd2 = cKDTree(col2, **kdtree_args) nears = kd1.query_ball_tree(kd2, r=dist, **query_args) # Output of above is nears which is a list of lists, where the outer # list corresponds to each item in col1, and where the inner lists are # indexes into col2 of elements within the distance tolerance. This # identifies col1 / col2 near pairs. # Now convert that into unique identifiers for each near-pair. This is # taken to be transitive, so that if points 1 and 2 are "near" and points # 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier. # This identifier will then be used in the table join matching. # Identifiers for each column, initialized to all zero. ids1 = np.zeros(len(col1), dtype=int) ids2 = np.zeros(len(col2), dtype=int) # Start the identifier count at 1 id_ = 1 for idx1, idxs2 in enumerate(nears): for idx2 in idxs2: # If this col1 point is previously identified then set corresponding # col2 point to same identifier. Likewise for col2 and col1. if ids1[idx1] > 0: ids2[idx2] = ids1[idx1] elif ids2[idx2] > 0: ids1[idx1] = ids2[idx2] else: # Not yet seen so set identifier for col1 and col2 ids1[idx1] = id_ ids2[idx2] = id_ id_ += 1 # Fill in unique identifiers for points with no near neighbor for ids in (ids1, ids2): for idx in np.flatnonzero(ids == 0): ids[idx] = id_ id_ += 1 # End of enclosure join_func() return ids1, ids2 return join_func def join( left, right, keys=None, join_type="inner", *, keys_left=None, keys_right=None, uniq_col_name="{col_name}_{table_name}", table_names=["1", "2"], metadata_conflicts="warn", join_funcs=None, ): """ Perform a join of the left table with the right table on specified keys. Parameters ---------- left : `~astropy.table.Table`-like object Left side table in the join. If not a Table, will call ``Table(left)`` right : `~astropy.table.Table`-like object Right side table in the join. If not a Table, will call ``Table(right)`` keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' keys_left : str or list of str or list of column-like, optional Left column(s) used to match rows instead of ``keys`` arg. This can be be a single left table column name or list of column names, or a list of column-like values with the same lengths as the left table. keys_right : str or list of str or list of column-like, optional Same as ``keys_left``, but for the right side of the join. uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation. """ # Try converting inputs to Table as needed if not isinstance(left, Table): left = Table(left) if not isinstance(right, Table): right = Table(right) col_name_map = OrderedDict() out = _join( left, right, keys, join_type, uniq_col_name, table_names, col_name_map, metadata_conflicts, join_funcs, keys_left=keys_left, keys_right=keys_right, ) # Merge the column and table meta data. Table subclasses might override # these methods for custom merge behavior. _merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts) return out def setdiff(table1, table2, keys=None): """ Take a set difference of table rows. The row set difference will contain all rows in ``table1`` that are not present in ``table2``. If the keys parameter is not defined, all columns in ``table1`` will be included in the output table. Parameters ---------- table1 : `~astropy.table.Table` ``table1`` is on the left side of the set difference. table2 : `~astropy.table.Table` ``table2`` is on the right side of the set difference. keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns in ``table1``. Returns ------- diff_table : `~astropy.table.Table` New table containing the set difference between tables. If the set difference is none, an empty table will be returned. Examples -------- To get a set difference between two tables:: >>> from astropy.table import setdiff, Table >>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b')) >>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b')) >>> print(t1) a b --- --- 1 c 4 d 9 f >>> print(t2) a b --- --- 1 c 5 b 9 f >>> print(setdiff(t1, t2)) a b --- --- 4 d >>> print(setdiff(t2, t1)) a b --- --- 5 b """ if keys is None: keys = table1.colnames # Check that all keys are in table1 and table2 for tbl, tbl_str in ((table1, "table1"), (table2, "table2")): diff_keys = np.setdiff1d(keys, tbl.colnames) if len(diff_keys) != 0: raise ValueError( "The {} columns are missing from {}, cannot take " "a set difference.".format(diff_keys, tbl_str) ) # Make a light internal copy of both tables t1 = table1.copy(copy_data=False) t1.meta = {} t1.keep_columns(keys) t1["__index1__"] = np.arange(len(table1)) # Keep track of rows indices # Make a light internal copy to avoid touching table2 t2 = table2.copy(copy_data=False) t2.meta = {} t2.keep_columns(keys) # Dummy column to recover rows after join t2["__index2__"] = np.zeros(len(t2), dtype=np.uint8) # dummy column t12 = _join(t1, t2, join_type="left", keys=keys, metadata_conflicts="silent") # If t12 index2 is masked then that means some rows were in table1 but not table2. if hasattr(t12["__index2__"], "mask"): # Define bool mask of table1 rows not in table2 diff = t12["__index2__"].mask # Get the row indices of table1 for those rows idx = t12["__index1__"][diff] # Select corresponding table1 rows straight from table1 to ensure # correct table and column types. t12_diff = table1[idx] else: t12_diff = table1[[]] return t12_diff def dstack(tables, join_type="outer", metadata_conflicts="warn"): """ Stack columns within tables depth-wise A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along depth-wise with the current table Table columns should have same shape and name for depth-wise stacking join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import dstack, Table >>> t1 = Table({'a': [1., 2.], 'b': [3., 4.]}, names=('a', 'b')) >>> t2 = Table({'a': [5., 6.], 'b': [7., 8.]}, names=('a', 'b')) >>> print(t1) a b --- --- 1.0 3.0 2.0 4.0 >>> print(t2) a b --- --- 5.0 7.0 6.0 8.0 >>> print(dstack([t1, t2])) a b ---------- ---------- 1.0 .. 5.0 3.0 .. 7.0 2.0 .. 6.0 4.0 .. 8.0 """ _check_join_type(join_type, "dstack") tables = _get_list_of_tables(tables) if len(tables) == 1: return tables[0] # no point in stacking a single table n_rows = {len(table) for table in tables} if len(n_rows) != 1: raise ValueError("Table lengths must all match for dstack") n_row = n_rows.pop() out = vstack(tables, join_type, metadata_conflicts) for name, col in out.columns.items(): col = out[name] # Reshape to so each original column is now in a row. # If entries are not 0-dim then those additional shape dims # are just carried along. # [x x x y y y] => [[x x x], # [y y y]] new_shape = (len(tables), n_row) + col.shape[1:] try: col.shape = (len(tables), n_row) + col.shape[1:] except AttributeError: col = col.reshape(new_shape) # Transpose the table and row axes to get to # [[x, y], # [x, y] # [x, y]] axes = np.arange(len(col.shape)) axes[:2] = [1, 0] # This temporarily makes `out` be corrupted (columns of different # length) but it all works out in the end. out.columns.__setitem__(name, col.transpose(axes), validated=True) return out def vstack(tables, join_type="outer", metadata_conflicts="warn"): """ Stack tables vertically (along rows) A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along rows (vertically) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import vstack, Table >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) a b --- --- 5 7 6 8 >>> print(vstack([t1, t2])) a b --- --- 1 3 2 4 5 7 6 8 """ _check_join_type(join_type, "vstack") tables = _get_list_of_tables(tables) # validates input if len(tables) == 1: return tables[0] # no point in stacking a single table col_name_map = OrderedDict() out = _vstack(tables, join_type, col_name_map, metadata_conflicts) # Merge table metadata _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) return out def hstack( tables, join_type="outer", uniq_col_name="{col_name}_{table_name}", table_names=None, metadata_conflicts="warn", ): """ Stack tables along columns (horizontally) A ``join_type`` of 'exact' means that the tables must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' (default) means the output will have the union of all rows, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Tables to stack along columns (horizontally) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. See Also -------- Table.add_columns, Table.replace_column, Table.update Examples -------- To stack two tables horizontally (along columns) do:: >>> from astropy.table import Table, hstack >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) c d --- --- 5 7 6 8 >>> print(hstack([t1, t2])) a b c d --- --- --- --- 1 3 5 7 2 4 6 8 """ _check_join_type(join_type, "hstack") tables = _get_list_of_tables(tables) # validates input if len(tables) == 1: return tables[0] # no point in stacking a single table col_name_map = OrderedDict() out = _hstack(tables, join_type, uniq_col_name, table_names, col_name_map) _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) return out def unique(input_table, keys=None, silent=False, keep="first"): """ Returns the unique rows of a table. Parameters ---------- input_table : table-like keys : str or list of str Name(s) of column(s) used to create unique rows. Default is to use all columns. keep : {'first', 'last', 'none'} Whether to keep the first or last row for each set of duplicates. If 'none', all rows that are duplicate are removed, leaving only rows that are already unique in the input. Default is 'first'. silent : bool If `True`, masked value column(s) are silently removed from ``keys``. If `False`, an exception is raised when ``keys`` contains masked value column(s). Default is `False`. Returns ------- unique_table : `~astropy.table.Table` object New table containing only the unique rows of ``input_table``. Examples -------- >>> from astropy.table import unique, Table >>> import numpy as np >>> table = Table(data=[[1,2,3,2,3,3], ... [2,3,4,5,4,6], ... [3,4,5,6,7,8]], ... names=['col1', 'col2', 'col3'], ... dtype=[np.int32, np.int32, np.int32]) >>> table <Table length=6> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 2 5 6 3 4 7 3 6 8 >>> unique(table, keys='col1') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 >>> unique(table, keys=['col1'], keep='last') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 5 6 3 6 8 >>> unique(table, keys=['col1', 'col2']) <Table length=5> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 4 5 3 6 8 >>> unique(table, keys=['col1', 'col2'], keep='none') <Table length=4> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 6 8 >>> unique(table, keys=['col1'], keep='none') <Table length=1> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 """ if keep not in ("first", "last", "none"): raise ValueError("'keep' should be one of 'first', 'last', 'none'") if isinstance(keys, str): keys = [keys] if keys is None: keys = input_table.colnames else: if len(set(keys)) != len(keys): raise ValueError("duplicate key names") # Check for columns with masked values for key in keys[:]: col = input_table[key] if hasattr(col, "mask") and np.any(col.mask): if not silent: raise ValueError( "cannot use columns with masked values as keys; " "remove column '{}' from keys and rerun " "unique()".format(key) ) del keys[keys.index(key)] if len(keys) == 0: raise ValueError( "no column remained in ``keys``; " "unique() cannot work with masked value " "key columns" ) grouped_table = input_table.group_by(keys) indices = grouped_table.groups.indices if keep == "first": indices = indices[:-1] elif keep == "last": indices = indices[1:] - 1 else: indices = indices[:-1][np.diff(indices) == 1] return grouped_table[indices] def get_col_name_map( arrays, common_names, uniq_col_name="{col_name}_{table_name}", table_names=None ): """ Find the column names mapping when merging the list of tables ``arrays``. It is assumed that col names in ``common_names`` are to be merged into a single column while the rest will be uniquely represented in the output. The args ``uniq_col_name`` and ``table_names`` specify how to rename columns in case of conflicts. Returns a dict mapping each output column name to the input(s). This takes the form {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names will be present, while for the other non-key columns the value will be (col_name_0, None, ..) or (None, col_name_1, ..) etc. """ col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) col_name_list = [] if table_names is None: table_names = [str(ii + 1) for ii in range(len(arrays))] for idx, array in enumerate(arrays): table_name = table_names[idx] for name in array.colnames: out_name = name if name in common_names: # If name is in the list of common_names then insert into # the column name list, but just once. if name not in col_name_list: col_name_list.append(name) else: # If name is not one of the common column outputs, and it collides # with the names in one of the other arrays, then rename others = list(arrays) others.pop(idx) if any(name in other.colnames for other in others): out_name = uniq_col_name.format( table_name=table_name, col_name=name ) col_name_list.append(out_name) col_name_map[out_name][idx] = name # Check for duplicate output column names col_name_count = Counter(col_name_list) repeated_names = [name for name, count in col_name_count.items() if count > 1] if repeated_names: raise TableMergeError( "Merging column names resulted in duplicates: {}. " "Change uniq_col_name or table_names args to fix this.".format( repeated_names ) ) # Convert col_name_map to a regular dict with tuple (immutable) values col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) return col_name_map def get_descrs(arrays, col_name_map): """ Find the dtypes descrs resulting from merging the list of arrays' dtypes, using the column name mapping ``col_name_map``. Return a list of descrs for the output. """ out_descrs = [] for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] # List of names of the columns that contribute to this output column. names = [name for name in in_names if name is not None] # Output dtype is the superset of all dtypes in in_arrays try: dtype = common_dtype(in_cols) except TableMergeError as tme: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError( "The '{}' columns have incompatible types: {}".format( names[0], tme._incompat_types ) ) from tme # Make sure all input shapes are the same uniq_shapes = {col.shape[1:] for col in in_cols} if len(uniq_shapes) != 1: raise TableMergeError(f"Key columns {names!r} have different shape") shape = uniq_shapes.pop() if out_name is not None: out_name = str(out_name) out_descrs.append((out_name, dtype, shape)) return out_descrs def common_dtype(cols): """ Use numpy to find the common dtype for a list of columns. Only allow columns within the following fundamental numpy data types: np.bool_, np.object_, np.number, np.character, np.void """ try: return metadata.common_dtype(cols) except metadata.MergeConflictError as err: tme = TableMergeError(f"Columns have incompatible types {err._incompat_types}") tme._incompat_types = err._incompat_types raise tme from err def _get_join_sort_idxs(keys, left, right): # Go through each of the key columns in order and make columns for # a new structured array that represents the lexical ordering of those # key columns. This structured array is then argsort'ed. The trick here # is that some columns (e.g. Time) may need to be expanded into multiple # columns for ordering here. ii = 0 # Index for uniquely naming the sort columns # sortable_table dtypes as list of (name, dtype_str, shape) tuples sort_keys_dtypes = [] sort_keys = [] # sortable_table (structured ndarray) column names sort_left = {} # sortable ndarrays from left table sort_right = {} # sortable ndarray from right table for key in keys: # get_sortable_arrays() returns a list of ndarrays that can be lexically # sorted to represent the order of the column. In most cases this is just # a single element of the column itself. left_sort_cols = left[key].info.get_sortable_arrays() right_sort_cols = right[key].info.get_sortable_arrays() if len(left_sort_cols) != len(right_sort_cols): # Should never happen because cols are screened beforehand for compatibility raise RuntimeError("mismatch in sort cols lengths") for left_sort_col, right_sort_col in zip(left_sort_cols, right_sort_cols): # Check for consistency of shapes. Mismatch should never happen. shape = left_sort_col.shape[1:] if shape != right_sort_col.shape[1:]: raise RuntimeError("mismatch in shape of left vs. right sort array") if shape != (): raise ValueError(f"sort key column {key!r} must be 1-d") sort_key = str(ii) sort_keys.append(sort_key) sort_left[sort_key] = left_sort_col sort_right[sort_key] = right_sort_col # Build up dtypes for the structured array that gets sorted. dtype_str = common_dtype([left_sort_col, right_sort_col]) sort_keys_dtypes.append((sort_key, dtype_str)) ii += 1 # Make the empty sortable table and fill it len_left = len(left) sortable_table = np.empty(len_left + len(right), dtype=sort_keys_dtypes) for key in sort_keys: sortable_table[key][:len_left] = sort_left[key] sortable_table[key][len_left:] = sort_right[key] # Finally do the (lexical) argsort and make a new sorted version idx_sort = sortable_table.argsort(order=sort_keys) sorted_table = sortable_table[idx_sort] # Get indexes of unique elements (i.e. the group boundaries) diffs = np.concatenate(([True], sorted_table[1:] != sorted_table[:-1], [True])) idxs = np.flatnonzero(diffs) return idxs, idx_sort def _apply_join_funcs(left, right, keys, join_funcs): """Apply join_funcs""" # Make light copies of left and right, then add new index columns. left = left.copy(copy_data=False) right = right.copy(copy_data=False) for key, join_func in join_funcs.items(): ids1, ids2 = join_func(left[key], right[key]) # Define a unique id_key name, and keep adding underscores until we have # a name not yet present. id_key = key + "_id" while id_key in left.columns or id_key in right.columns: id_key = id_key[:-2] + "_id" keys = tuple(id_key if orig_key == key else orig_key for orig_key in keys) left.add_column(ids1, index=0, name=id_key) # [id_key] = ids1 right.add_column(ids2, index=0, name=id_key) # [id_key] = ids2 return left, right, keys def _join( left, right, keys=None, join_type="inner", uniq_col_name="{col_name}_{table_name}", table_names=["1", "2"], col_name_map=None, metadata_conflicts="warn", join_funcs=None, keys_left=None, keys_right=None, ): """ Perform a join of the left and right Tables on specified keys. Parameters ---------- left : Table Left side table in the join right : Table Right side table in the join keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. col_name_map : empty dict or None If passed as a dict then it will be updated in-place with the mapping of output to input column names. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation. """ # Store user-provided col_name_map until the end _col_name_map = col_name_map # Special column name for cartesian join, should never collide with real column cartesian_index_name = "__table_cartesian_join_temp_index__" if join_type not in ("inner", "outer", "left", "right", "cartesian"): raise ValueError( "The 'join_type' argument should be in 'inner', " "'outer', 'left', 'right', or 'cartesian' " "(got '{}' instead)".format(join_type) ) if join_type == "cartesian": if keys: raise ValueError("cannot supply keys for a cartesian join") if join_funcs: raise ValueError("cannot supply join_funcs for a cartesian join") # Make light copies of left and right, then add temporary index columns # with all the same value so later an outer join turns into a cartesian join. left = left.copy(copy_data=False) right = right.copy(copy_data=False) left[cartesian_index_name] = np.uint8(0) right[cartesian_index_name] = np.uint8(0) keys = (cartesian_index_name,) # Handle the case of join key columns that are different between left and # right via keys_left/keys_right args. This is done by saving the original # input tables and making new left and right tables that contain only the # key cols but with common column names ['0', '1', etc]. This sets `keys` to # those fake key names in the left and right tables if keys_left is not None or keys_right is not None: left_orig = left right_orig = right left, right, keys = _join_keys_left_right( left, right, keys, keys_left, keys_right, join_funcs ) if keys is None: keys = tuple(name for name in left.colnames if name in right.colnames) if len(keys) == 0: raise TableMergeError("No keys in common between left and right tables") elif isinstance(keys, str): # If we have a single key, put it in a tuple keys = (keys,) # Check the key columns for arr, arr_label in ((left, "Left"), (right, "Right")): for name in keys: if name not in arr.colnames: raise TableMergeError( f"{arr_label} table does not have key column {name!r}" ) if hasattr(arr[name], "mask") and np.any(arr[name].mask): raise TableMergeError( f"{arr_label} key column {name!r} has missing values" ) if join_funcs is not None: if not all(key in keys for key in join_funcs): raise ValueError( f"join_funcs keys {join_funcs.keys()} must be a " f"subset of join keys {keys}" ) left, right, keys = _apply_join_funcs(left, right, keys, join_funcs) len_left, len_right = len(left), len(right) if len_left == 0 or len_right == 0: raise ValueError("input tables for join must both have at least one row") try: idxs, idx_sort = _get_join_sort_idxs(keys, left, right) except NotImplementedError: raise TypeError("one or more key columns are not sortable") # Now that we have idxs and idx_sort, revert to the original table args to # carry on with making the output joined table. `keys` is set to to an empty # list so that all original left and right columns are included in the # output table. if keys_left is not None or keys_right is not None: keys = [] left = left_orig right = right_orig # Joined array dtype as a list of descr (name, type_str, shape) tuples col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names) out_descrs = get_descrs([left, right], col_name_map) # Main inner loop in Cython to compute the cartesian product # indices for the given join type int_join_type = {"inner": 0, "outer": 1, "left": 2, "right": 3, "cartesian": 1}[ join_type ] masked, n_out, left_out, left_mask, right_out, right_mask = _np_utils.join_inner( idxs, idx_sort, len_left, int_join_type ) out = _get_out_class([left, right])() for out_name, dtype, shape in out_descrs: if out_name == cartesian_index_name: continue left_name, right_name = col_name_map[out_name] if left_name and right_name: # this is a key which comes from left and right cols = [left[left_name], right[right_name]] col_cls = _get_out_class(cols) if not hasattr(col_cls.info, "new_like"): raise NotImplementedError( "join unavailable for mixin column type(s): {}".format( col_cls.__name__ ) ) out[out_name] = col_cls.info.new_like( cols, n_out, metadata_conflicts, out_name ) out[out_name][:] = np.where( right_mask, left[left_name].take(left_out), right[right_name].take(right_out), ) continue elif left_name: # out_name came from the left table name, array, array_out, array_mask = left_name, left, left_out, left_mask elif right_name: name, array, array_out, array_mask = ( right_name, right, right_out, right_mask, ) else: raise TableMergeError('Unexpected column names (maybe one is ""?)') # Select the correct elements from the original table col = array[name][array_out] # If the output column is masked then set the output column masking # accordingly. Check for columns that don't support a mask attribute. if masked and np.any(array_mask): # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) # array_mask is 1-d corresponding to length of output column. We need # make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..). # Mixin columns might not have ndim attribute so use len(col.shape). array_mask.shape = (col.shape[0],) + (1,) * (len(col.shape) - 1) # Now broadcast to the correct final shape array_mask = np.broadcast_to(array_mask, col.shape) try: col[array_mask] = col.info.mask_val except Exception as err: # Not clear how different classes will fail here raise NotImplementedError( "join requires masking column '{}' but column" " type {} does not support masking".format( out_name, col.__class__.__name__ ) ) from err # Set the output table column to the new joined column out[out_name] = col # If col_name_map supplied as a dict input, then update. if isinstance(_col_name_map, Mapping): _col_name_map.update(col_name_map) return out def _join_keys_left_right(left, right, keys, keys_left, keys_right, join_funcs): """Do processing to handle keys_left / keys_right args for join. This takes the keys_left/right inputs and turns them into a list of left/right columns corresponding to those inputs (which can be column names or column data values). It also generates the list of fake key column names (strings of "1", "2", etc.) that correspond to the input keys. """ def _keys_to_cols(keys, table, label): # Process input `keys`, which is a str or list of str column names in # `table` or a list of column-like objects. The `label` is just for # error reporting. if isinstance(keys, str): keys = [keys] cols = [] for key in keys: if isinstance(key, str): try: cols.append(table[key]) except KeyError: raise ValueError(f"{label} table does not have key column {key!r}") else: if len(key) != len(table): raise ValueError( f"{label} table has different length from key {key}" ) cols.append(key) return cols if join_funcs is not None: raise ValueError("cannot supply join_funcs arg and keys_left / keys_right") if keys_left is None or keys_right is None: raise ValueError("keys_left and keys_right must both be provided") if keys is not None: raise ValueError( "keys arg must be None if keys_left and keys_right are supplied" ) cols_left = _keys_to_cols(keys_left, left, "left") cols_right = _keys_to_cols(keys_right, right, "right") if len(cols_left) != len(cols_right): raise ValueError("keys_left and keys_right args must have same length") # Make two new temp tables for the join with only the join columns and # key columns in common. keys = [f"{ii}" for ii in range(len(cols_left))] left = left.__class__(cols_left, names=keys, copy=False) right = right.__class__(cols_right, names=keys, copy=False) return left, right, keys def _check_join_type(join_type, func_name): """Check join_type arg in hstack and vstack. This specifically checks for the common mistake of call vstack(t1, t2) instead of vstack([t1, t2]). The subsequent check of ``join_type in ('inner', ..)`` does not raise in this case. """ if not isinstance(join_type, str): msg = "`join_type` arg must be a string" if isinstance(join_type, Table): msg += ( ". Did you accidentally " f"call {func_name}(t1, t2, ..) instead of " f"{func_name}([t1, t2], ..)?" ) raise TypeError(msg) if join_type not in ("inner", "exact", "outer"): raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'") def _vstack(arrays, join_type="outer", col_name_map=None, metadata_conflicts="warn"): """ Stack Tables vertically (by rows) A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' means the output will have the union of all columns, with array values being masked where no common values are available. Parameters ---------- arrays : list of Tables Tables to stack by rows (vertically) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' col_name_map : empty dict or None If passed as a dict then it will be updated in-place with the mapping of output to input column names. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. """ # Store user-provided col_name_map until the end _col_name_map = col_name_map # Trivial case of one input array if len(arrays) == 1: return arrays[0] # Start by assuming an outer match where all names go to output names = set(itertools.chain(*[arr.colnames for arr in arrays])) col_name_map = get_col_name_map(arrays, names) # If require_match is True then the output must have exactly the same # number of columns as each input array if join_type == "exact": for names in col_name_map.values(): if any(x is None for x in names): raise TableMergeError( "Inconsistent columns in input arrays " "(use 'inner' or 'outer' join_type to " "allow non-matching columns)" ) join_type = "outer" # For an inner join, keep only columns where all input arrays have that column if join_type == "inner": col_name_map = OrderedDict( (name, in_names) for name, in_names in col_name_map.items() if all(x is not None for x in in_names) ) if len(col_name_map) == 0: raise TableMergeError("Input arrays have no columns in common") lens = [len(arr) for arr in arrays] n_rows = sum(lens) out = _get_out_class(arrays)() for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] col_cls = _get_out_class(cols) if not hasattr(col_cls.info, "new_like"): raise NotImplementedError( "vstack unavailable for mixin column type(s): {}".format( col_cls.__name__ ) ) try: col = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name) except metadata.MergeConflictError as err: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError( "The '{}' columns have incompatible types: {}".format( out_name, err._incompat_types ) ) from err idx0 = 0 for name, array in zip(in_names, arrays): idx1 = idx0 + len(array) if name in array.colnames: col[idx0:idx1] = array[name] else: # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) try: col[idx0:idx1] = col.info.mask_val except Exception as err: raise NotImplementedError( "vstack requires masking column '{}' but column" " type {} does not support masking".format( out_name, col.__class__.__name__ ) ) from err idx0 = idx1 out[out_name] = col # If col_name_map supplied as a dict input, then update. if isinstance(_col_name_map, Mapping): _col_name_map.update(col_name_map) return out def _hstack( arrays, join_type="outer", uniq_col_name="{col_name}_{table_name}", table_names=None, col_name_map=None, ): """ Stack tables horizontally (by columns) A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' means the output will have the union of all rows, with array values being masked where no common values are available. Parameters ---------- arrays : List of tables Tables to stack by columns (horizontally) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. """ # Store user-provided col_name_map until the end _col_name_map = col_name_map if table_names is None: table_names = [f"{ii + 1}" for ii in range(len(arrays))] if len(arrays) != len(table_names): raise ValueError("Number of arrays must match number of table_names") # Trivial case of one input arrays if len(arrays) == 1: return arrays[0] col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names) # If require_match is True then all input arrays must have the same length arr_lens = [len(arr) for arr in arrays] if join_type == "exact": if len(set(arr_lens)) > 1: raise TableMergeError( "Inconsistent number of rows in input arrays " "(use 'inner' or 'outer' join_type to allow " "non-matching rows)" ) join_type = "outer" # For an inner join, keep only the common rows if join_type == "inner": min_arr_len = min(arr_lens) if len(set(arr_lens)) > 1: arrays = [arr[:min_arr_len] for arr in arrays] arr_lens = [min_arr_len for arr in arrays] # If there are any output rows where one or more input arrays are missing # then the output must be masked. If any input arrays are masked then # output is masked. n_rows = max(arr_lens) out = _get_out_class(arrays)() for out_name, in_names in col_name_map.items(): for name, array, arr_len in zip(in_names, arrays, arr_lens): if name is None: continue if n_rows > arr_len: indices = np.arange(n_rows) indices[arr_len:] = 0 col = array[name][indices] # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) try: col[arr_len:] = col.info.mask_val except Exception as err: raise NotImplementedError( "hstack requires masking column '{}' but column" " type {} does not support masking".format( out_name, col.__class__.__name__ ) ) from err else: col = array[name][:n_rows] out[out_name] = col # If col_name_map supplied as a dict input, then update. if isinstance(_col_name_map, Mapping): _col_name_map.update(col_name_map) return out
0cc3033ea923f90dfb3a4232c6c12deae541731bee00250ad742553848b9548b
""" High-level operations for numpy structured arrays. Some code and inspiration taken from numpy.lib.recfunctions.join_by(). Redistribution license restrictions apply. """ import collections from collections import Counter, OrderedDict from collections.abc import Sequence import numpy as np __all__ = ["TableMergeError"] class TableMergeError(ValueError): pass def get_col_name_map( arrays, common_names, uniq_col_name="{col_name}_{table_name}", table_names=None ): """ Find the column names mapping when merging the list of structured ndarrays ``arrays``. It is assumed that col names in ``common_names`` are to be merged into a single column while the rest will be uniquely represented in the output. The args ``uniq_col_name`` and ``table_names`` specify how to rename columns in case of conflicts. Returns a dict mapping each output column name to the input(s). This takes the form {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names will be present, while for the other non-key columns the value will be (col_name_0, None, ..) or (None, col_name_1, ..) etc. """ col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) col_name_list = [] if table_names is None: table_names = [str(ii + 1) for ii in range(len(arrays))] for idx, array in enumerate(arrays): table_name = table_names[idx] for name in array.dtype.names: out_name = name if name in common_names: # If name is in the list of common_names then insert into # the column name list, but just once. if name not in col_name_list: col_name_list.append(name) else: # If name is not one of the common column outputs, and it collides # with the names in one of the other arrays, then rename others = list(arrays) others.pop(idx) if any(name in other.dtype.names for other in others): out_name = uniq_col_name.format( table_name=table_name, col_name=name ) col_name_list.append(out_name) col_name_map[out_name][idx] = name # Check for duplicate output column names col_name_count = Counter(col_name_list) repeated_names = [name for name, count in col_name_count.items() if count > 1] if repeated_names: raise TableMergeError( "Merging column names resulted in duplicates: {}. " "Change uniq_col_name or table_names args to fix this.".format( repeated_names ) ) # Convert col_name_map to a regular dict with tuple (immutable) values col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) return col_name_map def get_descrs(arrays, col_name_map): """ Find the dtypes descrs resulting from merging the list of arrays' dtypes, using the column name mapping ``col_name_map``. Return a list of descrs for the output. """ out_descrs = [] for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] # List of names of the columns that contribute to this output column. names = [name for name in in_names if name is not None] # Output dtype is the superset of all dtypes in in_arrays try: dtype = common_dtype(in_cols) except TableMergeError as tme: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError( "The '{}' columns have incompatible types: {}".format( names[0], tme._incompat_types ) ) from tme # Make sure all input shapes are the same uniq_shapes = {col.shape[1:] for col in in_cols} if len(uniq_shapes) != 1: raise TableMergeError("Key columns have different shape") shape = uniq_shapes.pop() if out_name is not None: out_name = str(out_name) out_descrs.append((out_name, dtype, shape)) return out_descrs def common_dtype(cols): """ Use numpy to find the common dtype for a list of structured ndarray columns. Only allow columns within the following fundamental numpy data types: np.bool_, np.object_, np.number, np.character, np.void """ np_types = (np.bool_, np.object_, np.number, np.character, np.void) uniq_types = { tuple(issubclass(col.dtype.type, np_type) for np_type in np_types) for col in cols } if len(uniq_types) > 1: # Embed into the exception the actual list of incompatible types. incompat_types = [col.dtype.name for col in cols] tme = TableMergeError(f"Columns have incompatible types {incompat_types}") tme._incompat_types = incompat_types raise tme arrs = [np.empty(1, dtype=col.dtype) for col in cols] # For string-type arrays need to explicitly fill in non-zero # values or the final arr_common = .. step is unpredictable. for arr in arrs: if arr.dtype.kind in ("S", "U"): arr[0] = "0" * arr.itemsize arr_common = np.array([arr[0] for arr in arrs]) return arr_common.dtype.str def _check_for_sequence_of_structured_arrays(arrays): err = "`arrays` arg must be a sequence (e.g. list) of structured arrays" if not isinstance(arrays, Sequence): raise TypeError(err) for array in arrays: # Must be structured array if not isinstance(array, np.ndarray) or array.dtype.names is None: raise TypeError(err) if len(arrays) == 0: raise ValueError("`arrays` arg must include at least one array")
e5e019565d9fce0cde649bb981ed2ddefe3a7184d15d84cde5d60e12dded52f8
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import numpy from setuptools import Extension ROOT = os.path.relpath(os.path.dirname(__file__)) def get_extensions(): sources = ["_np_utils.pyx", "_column_mixins.pyx"] include_dirs = [numpy.get_include()] exts = [ Extension( name="astropy.table." + os.path.splitext(source)[0], sources=[os.path.join(ROOT, source)], include_dirs=include_dirs, ) for source in sources ] return exts
9b350c3524fc443294dc204040c93d471d5ba0632cec49d12530374f783b605d
import copy import json import textwrap from collections import OrderedDict import numpy as np import yaml __all__ = ["get_header_from_yaml", "get_yaml_from_header", "get_yaml_from_table"] class ColumnOrderList(list): """ List of tuples that sorts in a specific order that makes sense for astropy table column attributes. """ def sort(self, *args, **kwargs): super().sort() column_keys = ["name", "unit", "datatype", "format", "description", "meta"] in_dict = dict(self) out_list = [] for key in column_keys: if key in in_dict: out_list.append((key, in_dict[key])) for key, val in self: if key not in column_keys: out_list.append((key, val)) # Clear list in-place del self[:] self.extend(out_list) class ColumnDict(dict): """ Specialized dict subclass to represent attributes of a Column and return items() in a preferred order. This is only for use in generating a YAML map representation that has a fixed order. """ def items(self): """ Return items as a ColumnOrderList, which sorts in the preferred way for column attributes. """ return ColumnOrderList(super().items()) def _construct_odict(load, node): """ Construct OrderedDict from !!omap in yaml safe load. Source: https://gist.github.com/weaver/317164 License: Unspecified This is the same as SafeConstructor.construct_yaml_omap(), except the data type is changed to OrderedDict() and setitem is used instead of append in the loop Examples -------- :: >>> yaml.load(''' # doctest: +SKIP ... !!omap ... - foo: bar ... - mumble: quux ... - baz: gorp ... ''') OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) >>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) """ omap = OrderedDict() yield omap if not isinstance(node, yaml.SequenceNode): raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, f"expected a sequence, but found {node.id}", node.start_mark, ) for subnode in node.value: if not isinstance(subnode, yaml.MappingNode): raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, f"expected a mapping of length 1, but found {subnode.id}", subnode.start_mark, ) if len(subnode.value) != 1: raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, f"expected a single mapping item, but found {len(subnode.value)} items", subnode.start_mark, ) key_node, value_node = subnode.value[0] key = load.construct_object(key_node) value = load.construct_object(value_node) omap[key] = value def _repr_pairs(dump, tag, sequence, flow_style=None): """ This is the same code as BaseRepresenter.represent_sequence(), but the value passed to dump.represent_data() in the loop is a dictionary instead of a tuple. Source: https://gist.github.com/weaver/317164 License: Unspecified """ value = [] node = yaml.SequenceNode(tag, value, flow_style=flow_style) if dump.alias_key is not None: dump.represented_objects[dump.alias_key] = node best_style = True for key, val in sequence: item = dump.represent_data({key: val}) if not (isinstance(item, yaml.ScalarNode) and not item.style): best_style = False value.append(item) if flow_style is None: if dump.default_flow_style is not None: node.flow_style = dump.default_flow_style else: node.flow_style = best_style return node def _repr_odict(dumper, data): """ Represent OrderedDict in yaml dump. Source: https://gist.github.com/weaver/317164 License: Unspecified >>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) >>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP '!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n' >>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP '!!omap [foo: bar, mumble: quux, baz: gorp]\\n' """ return _repr_pairs(dumper, "tag:yaml.org,2002:omap", data.items()) def _repr_column_dict(dumper, data): """ Represent ColumnDict in yaml dump. This is the same as an ordinary mapping except that the keys are written in a fixed order that makes sense for astropy table columns. """ return dumper.represent_mapping("tag:yaml.org,2002:map", data) def _get_variable_length_array_shape(col): """Check if object-type ``col`` is really a variable length list. That is true if the object consists purely of list of nested lists, where the shape of every item can be represented as (m, n, ..., *) where the (m, n, ...) are constant and only the lists in the last axis have variable shape. If so the returned value of shape will be a tuple in the form (m, n, ..., None). If ``col`` is a variable length array then the return ``dtype`` corresponds to the type found by numpy for all the individual values. Otherwise it will be ``np.dtype(object)``. Parameters ========== col : column-like Input table column, assumed to be object-type Returns ======= shape : tuple Inferred variable length shape or None dtype : np.dtype Numpy dtype that applies to col """ class ConvertError(ValueError): """Local conversion error used below""" # Numpy types supported as variable-length arrays np_classes = (np.floating, np.integer, np.bool_, np.unicode_) try: if len(col) == 0 or not all(isinstance(val, np.ndarray) for val in col): raise ConvertError dtype = col[0].dtype shape = col[0].shape[:-1] for val in col: if not issubclass(val.dtype.type, np_classes) or val.shape[:-1] != shape: raise ConvertError dtype = np.promote_types(dtype, val.dtype) shape = shape + (None,) except ConvertError: # `col` is not a variable length array, return shape and dtype to # the original. Note that this function is only called if # col.shape[1:] was () and col.info.dtype is object. dtype = col.info.dtype shape = () return shape, dtype def _get_datatype_from_dtype(dtype): """Return string version of ``dtype`` for writing to ECSV ``datatype``""" datatype = dtype.name if datatype.startswith(("bytes", "str")): datatype = "string" if datatype.endswith("_"): datatype = datatype[:-1] # string_ and bool_ lose the final _ for ECSV return datatype def _get_col_attributes(col): """ Extract information from a column (apart from the values) that is required to fully serialize the column. Parameters ---------- col : column-like Input Table column Returns ------- attrs : dict Dict of ECSV attributes for ``col`` """ dtype = col.info.dtype # Type of column values that get written subtype = None # Type of data for object columns serialized with JSON shape = col.shape[1:] # Shape of multidim / variable length columns if dtype.name == "object": if shape == (): # 1-d object type column might be a variable length array dtype = np.dtype(str) shape, subtype = _get_variable_length_array_shape(col) else: # N-d object column is subtype object but serialized as JSON string dtype = np.dtype(str) subtype = np.dtype(object) elif shape: # N-d column which is not object is serialized as JSON string dtype = np.dtype(str) subtype = col.info.dtype datatype = _get_datatype_from_dtype(dtype) # Set the output attributes attrs = ColumnDict() attrs["name"] = col.info.name attrs["datatype"] = datatype for attr, nontrivial, xform in ( ("unit", lambda x: x is not None, str), ("format", lambda x: x is not None, None), ("description", lambda x: x is not None, None), ("meta", lambda x: x, None), ): col_attr = getattr(col.info, attr) if nontrivial(col_attr): attrs[attr] = xform(col_attr) if xform else col_attr if subtype: attrs["subtype"] = _get_datatype_from_dtype(subtype) # Numpy 'object' maps to 'subtype' of 'json' in ECSV if attrs["subtype"] == "object": attrs["subtype"] = "json" if shape: attrs["subtype"] += json.dumps(list(shape), separators=(",", ":")) return attrs def get_yaml_from_table(table): """ Return lines with a YAML representation of header content from the ``table``. Parameters ---------- table : `~astropy.table.Table` object Table for which header content is output Returns ------- lines : list List of text lines with YAML header content """ header = {"cols": list(table.columns.values())} if table.meta: header["meta"] = table.meta return get_yaml_from_header(header) def get_yaml_from_header(header): """ Return lines with a YAML representation of header content from a Table. The ``header`` dict must contain these keys: - 'cols' : list of table column objects (required) - 'meta' : table 'meta' attribute (optional) Other keys included in ``header`` will be serialized in the output YAML representation. Parameters ---------- header : dict Table header content Returns ------- lines : list List of text lines with YAML header content """ from astropy.io.misc.yaml import AstropyDumper class TableDumper(AstropyDumper): """ Custom Dumper that represents OrderedDict as an !!omap object. """ def represent_mapping(self, tag, mapping, flow_style=None): """ This is a combination of the Python 2 and 3 versions of this method in the PyYAML library to allow the required key ordering via the ColumnOrderList object. The Python 3 version insists on turning the items() mapping into a list object and sorting, which results in alphabetical order for the column keys. """ value = [] node = yaml.MappingNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True if hasattr(mapping, "items"): mapping = mapping.items() if hasattr(mapping, "sort"): mapping.sort() else: mapping = list(mapping) try: mapping = sorted(mapping) except TypeError: pass for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style): best_style = False if not ( isinstance(node_value, yaml.ScalarNode) and not node_value.style ): best_style = False value.append((node_key, node_value)) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node TableDumper.add_representer(OrderedDict, _repr_odict) TableDumper.add_representer(ColumnDict, _repr_column_dict) header = copy.copy(header) # Don't overwrite original header["datatype"] = [_get_col_attributes(col) for col in header["cols"]] del header["cols"] lines = yaml.dump( header, default_flow_style=None, Dumper=TableDumper, width=130 ).splitlines() return lines class YamlParseError(Exception): pass def get_header_from_yaml(lines): """ Get a header dict from input ``lines`` which should be valid YAML. This input will typically be created by get_yaml_from_header. The output is a dictionary which describes all the table and column meta. The get_cols() method in the io/ascii/ecsv.py file should be used as a guide to using the information when constructing a table using this header dict information. Parameters ---------- lines : list List of text lines with YAML header content Returns ------- header : dict Dictionary describing table and column meta """ from astropy.io.misc.yaml import AstropyLoader class TableLoader(AstropyLoader): """ Custom Loader that constructs OrderedDict from an !!omap object. This does nothing but provide a namespace for adding the custom odict constructor. """ TableLoader.add_constructor("tag:yaml.org,2002:omap", _construct_odict) # Now actually load the YAML data structure into `meta` header_yaml = textwrap.dedent("\n".join(lines)) try: header = yaml.load(header_yaml, Loader=TableLoader) except Exception as err: raise YamlParseError() from err return header
efb190c105bd67e038f0ad90ae12b73b4bb60f68676e2e706611d653e33bdd04
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np def _searchsorted(array, val, side="left"): """ Call np.searchsorted or use a custom binary search if necessary. """ if hasattr(array, "searchsorted"): return array.searchsorted(val, side=side) # Python binary search begin = 0 end = len(array) while begin < end: mid = (begin + end) // 2 if val > array[mid]: begin = mid + 1 elif val < array[mid]: end = mid elif side == "right": begin = mid + 1 else: end = mid return begin class SortedArray: """ Implements a sorted array container using a list of numpy arrays. Parameters ---------- data : Table Sorted columns of the original table row_index : Column object Row numbers corresponding to data columns unique : bool Whether the values of the index must be unique. Defaults to False. """ def __init__(self, data, row_index, unique=False): self.data = data self.row_index = row_index self.num_cols = len(getattr(data, "colnames", [])) self.unique = unique @property def cols(self): return list(self.data.columns.values()) def add(self, key, row): """ Add a new entry to the sorted array. Parameters ---------- key : tuple Column values at the given row row : int Row number """ pos = self.find_pos(key, row) # first >= key if ( self.unique and 0 <= pos < len(self.row_index) and all(self.data[pos][i] == key[i] for i in range(len(key))) ): # already exists raise ValueError(f'Cannot add duplicate value "{key}" in a unique index') self.data.insert_row(pos, key) self.row_index = self.row_index.insert(pos, row) def _get_key_slice(self, i, begin, end): """ Retrieve the ith slice of the sorted array from begin to end. """ if i < self.num_cols: return self.cols[i][begin:end] else: return self.row_index[begin:end] def find_pos(self, key, data, exact=False): """ Return the index of the largest key in data greater than or equal to the given key, data pair. Parameters ---------- key : tuple Column key data : int Row number exact : bool If True, return the index of the given key in data or -1 if the key is not present. """ begin = 0 end = len(self.row_index) num_cols = self.num_cols if not self.unique: # consider the row value as well key = key + (data,) num_cols += 1 # search through keys in lexicographic order for i in range(num_cols): key_slice = self._get_key_slice(i, begin, end) t = _searchsorted(key_slice, key[i]) # t is the smallest index >= key[i] if exact and (t == len(key_slice) or key_slice[t] != key[i]): # no match return -1 elif t == len(key_slice) or ( t == 0 and len(key_slice) > 0 and key[i] < key_slice[0] ): # too small or too large return begin + t end = begin + _searchsorted(key_slice, key[i], side="right") begin += t if begin >= len(self.row_index): # greater than all keys return begin return begin def find(self, key): """ Find all rows matching the given key. Parameters ---------- key : tuple Column values Returns ------- matching_rows : list List of rows matching the input key """ begin = 0 end = len(self.row_index) # search through keys in lexicographic order for i in range(self.num_cols): key_slice = self._get_key_slice(i, begin, end) t = _searchsorted(key_slice, key[i]) # t is the smallest index >= key[i] if t == len(key_slice) or key_slice[t] != key[i]: # no match return [] elif t == 0 and len(key_slice) > 0 and key[i] < key_slice[0]: # too small or too large return [] end = begin + _searchsorted(key_slice, key[i], side="right") begin += t if begin >= len(self.row_index): # greater than all keys return [] return self.row_index[begin:end] def range(self, lower, upper, bounds): """ Find values in the given range. Parameters ---------- lower : tuple Lower search bound upper : tuple Upper search bound bounds : (2,) tuple of bool Indicates whether the search should be inclusive or exclusive with respect to the endpoints. The first argument corresponds to an inclusive lower bound, and the second argument to an inclusive upper bound. """ lower_pos = self.find_pos(lower, 0) upper_pos = self.find_pos(upper, 0) if lower_pos == len(self.row_index): return [] lower_bound = tuple(col[lower_pos] for col in self.cols) if not bounds[0] and lower_bound == lower: lower_pos += 1 # data[lower_pos] > lower # data[lower_pos] >= lower # data[upper_pos] >= upper if upper_pos < len(self.row_index): upper_bound = tuple(col[upper_pos] for col in self.cols) if not bounds[1] and upper_bound == upper: upper_pos -= 1 # data[upper_pos] < upper elif upper_bound > upper: upper_pos -= 1 # data[upper_pos] <= upper return self.row_index[lower_pos : upper_pos + 1] def remove(self, key, data): """ Remove the given entry from the sorted array. Parameters ---------- key : tuple Column values data : int Row number Returns ------- successful : bool Whether the entry was successfully removed """ pos = self.find_pos(key, data, exact=True) if pos == -1: # key not found return False self.data.remove_row(pos) keep_mask = np.ones(len(self.row_index), dtype=bool) keep_mask[pos] = False self.row_index = self.row_index[keep_mask] return True def shift_left(self, row): """ Decrement all row numbers greater than the input row. Parameters ---------- row : int Input row number """ self.row_index[self.row_index > row] -= 1 def shift_right(self, row): """ Increment all row numbers greater than or equal to the input row. Parameters ---------- row : int Input row number """ self.row_index[self.row_index >= row] += 1 def replace_rows(self, row_map): """ Replace all rows with the values they map to in the given dictionary. Any rows not present as keys in the dictionary will have their entries deleted. Parameters ---------- row_map : dict Mapping of row numbers to new row numbers """ num_rows = len(row_map) keep_rows = np.zeros(len(self.row_index), dtype=bool) tagged = 0 for i, row in enumerate(self.row_index): if row in row_map: keep_rows[i] = True tagged += 1 if tagged == num_rows: break self.data = self.data[keep_rows] self.row_index = np.array([row_map[x] for x in self.row_index[keep_rows]]) def items(self): """ Retrieve all array items as a list of pairs of the form [(key, [row 1, row 2, ...]), ...] """ array = [] last_key = None for i, key in enumerate(zip(*self.data.columns.values())): row = self.row_index[i] if key == last_key: array[-1][1].append(row) else: last_key = key array.append((key, [row])) return array def sort(self): """ Make row order align with key order. """ self.row_index = np.arange(len(self.row_index)) def sorted_data(self): """ Return rows in sorted order. """ return self.row_index def __getitem__(self, item): """ Return a sliced reference to this sorted array. Parameters ---------- item : slice Slice to use for referencing """ return SortedArray(self.data[item], self.row_index[item]) def __repr__(self): t = self.data.copy() t["rows"] = self.row_index return f"<{self.__class__.__name__} length={len(t)}>\n{t}"
6adb99bd781ebd40bee533177fa2f03f6f98c9feb08c928b1f4ffa3128d3f027
# Licensed under a 3-clause BSD style license - see LICENSE.rst from collections import OrderedDict from copy import deepcopy from importlib import import_module import numpy as np from astropy.units.quantity import QuantityInfo from astropy.utils.data_info import MixinInfo from .column import Column, MaskedColumn from .table import QTable, Table, has_info_class # TODO: some of this might be better done programmatically, through # code like # __construct_mixin_classes += tuple( # f'astropy.coordinates.representation.{cls.__name__}' # for cls in (list(coorep.REPRESENTATION_CLASSES.values()) # + list(coorep.DIFFERENTIAL_CLASSES.values())) # if cls.__name__ in coorep.__all__) # However, to avoid very hard to track import issues, the definition # should then be done at the point where it is actually needed, # using local imports. See also # https://github.com/astropy/astropy/pull/10210#discussion_r419087286 __construct_mixin_classes = ( "astropy.time.core.Time", "astropy.time.core.TimeDelta", "astropy.units.quantity.Quantity", "astropy.units.function.logarithmic.Magnitude", "astropy.units.function.logarithmic.Decibel", "astropy.units.function.logarithmic.Dex", "astropy.coordinates.angles.Latitude", "astropy.coordinates.angles.Longitude", "astropy.coordinates.angles.Angle", "astropy.coordinates.distances.Distance", "astropy.coordinates.earth.EarthLocation", "astropy.coordinates.sky_coordinate.SkyCoord", "astropy.table.ndarray_mixin.NdarrayMixin", "astropy.table.table_helpers.ArrayWrapper", "astropy.table.column.Column", "astropy.table.column.MaskedColumn", "astropy.coordinates.representation.CartesianRepresentation", "astropy.coordinates.representation.UnitSphericalRepresentation", "astropy.coordinates.representation.RadialRepresentation", "astropy.coordinates.representation.SphericalRepresentation", "astropy.coordinates.representation.PhysicsSphericalRepresentation", "astropy.coordinates.representation.CylindricalRepresentation", "astropy.coordinates.representation.CartesianDifferential", "astropy.coordinates.representation.UnitSphericalDifferential", "astropy.coordinates.representation.SphericalDifferential", "astropy.coordinates.representation.UnitSphericalCosLatDifferential", "astropy.coordinates.representation.SphericalCosLatDifferential", "astropy.coordinates.representation.RadialDifferential", "astropy.coordinates.representation.PhysicsSphericalDifferential", "astropy.coordinates.representation.CylindricalDifferential", "astropy.utils.masked.core.MaskedNDArray", ) class SerializedColumnInfo(MixinInfo): """ Minimal info to allow SerializedColumn to be recognized as a mixin Column. Used to help create a dict of columns in ColumnInfo for structured data. """ def _represent_as_dict(self): # SerializedColumn is already a `dict`, so we can return it directly. return self._parent class SerializedColumn(dict): """Subclass of dict used to serialize mixin columns. It is used in the representation to contain the name and possible other info for a mixin column or attribute (either primary data or an array-like attribute) that is serialized as a column in the table. """ info = SerializedColumnInfo() @property def shape(self): """Minimal shape implementation to allow use as a mixin column. Returns the shape of the first item that has a shape at all, or ``()`` if none of the values has a shape attribute. """ return next( (value.shape for value in self.values() if hasattr(value, "shape")), () ) def _represent_mixin_as_column(col, name, new_cols, mixin_cols, exclude_classes=()): """Carry out processing needed to serialize ``col`` in an output table consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This relies on the object determine if any transformation is required and may depend on the ``serialize_method`` and ``serialize_context`` context variables. For instance a ``MaskedColumn`` may be stored directly to FITS, but can also be serialized as separate data and mask columns. This function builds up a list of plain columns in the ``new_cols`` arg (which is passed as a persistent list). This includes both plain columns from the original table and plain columns that represent data from serialized columns (e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column). For serialized columns the ``mixin_cols`` dict is updated with required attributes and information to subsequently reconstruct the table. Table mixin columns are always serialized and get represented by one or more data columns. In earlier versions of the code *only* mixin columns were serialized, hence the use within this code of "mixin" to imply serialization. Starting with version 3.1, the non-mixin ``MaskedColumn`` can also be serialized. """ obj_attrs = col.info._represent_as_dict() # If serialization is not required (see function docstring above) # or explicitly specified as excluded, then treat as a normal column. if not obj_attrs or col.__class__ in exclude_classes: new_cols.append(col) return # Subtlety here is handling mixin info attributes. The basic list of such # attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'. # - name: handled directly [DON'T store] # - unit: DON'T store if this is a parent attribute # - dtype: captured in plain Column if relevant [DON'T store] # - format: possibly irrelevant but settable post-object creation [DO store] # - description: DO store # - meta: DO store info = {} for attr, nontrivial in ( ("unit", lambda x: x is not None and x != ""), ("format", lambda x: x is not None), ("description", lambda x: x is not None), ("meta", lambda x: x), ): col_attr = getattr(col.info, attr) if nontrivial(col_attr): info[attr] = col_attr # Find column attributes that have the same length as the column itself. # These will be stored in the table as new columns (aka "data attributes"). # Examples include SkyCoord.ra (what is typically considered the data and is # always an array) and Skycoord.obs_time (which can be a scalar or an # array). data_attrs = [ key for key, value in obj_attrs.items() if getattr(value, "shape", ())[:1] == col.shape[:1] ] for data_attr in data_attrs: data = obj_attrs[data_attr] # New column name combines the old name and attribute # (e.g. skycoord.ra, skycoord.dec).unless it is the primary data # attribute for the column (e.g. value for Quantity or data for # MaskedColumn). For primary data, we attempt to store any info on # the format, etc., on the column, but not for ancillary data (e.g., # no sense to use a float format for a mask). is_primary = data_attr == col.info._represent_as_dict_primary_data if is_primary: new_name = name new_info = info else: new_name = name + "." + data_attr new_info = {} if not has_info_class(data, MixinInfo): col_cls = ( MaskedColumn if (hasattr(data, "mask") and np.any(data.mask)) else Column ) data = col_cls(data, name=new_name, **new_info) if is_primary: # Don't store info in the __serialized_columns__ dict for this column # since this is redundant with info stored on the new column. info = {} # Recurse. If this is anything that needs further serialization (i.e., # a Mixin column, a structured Column, a MaskedColumn for which mask is # stored, etc.), it will define obj_attrs[new_name]. Otherwise, it will # just add to new_cols and all we have to do is to link to the new name. _represent_mixin_as_column(data, new_name, new_cols, obj_attrs) obj_attrs[data_attr] = SerializedColumn( obj_attrs.pop(new_name, {"name": new_name}) ) # Strip out from info any attributes defined by the parent, # and store whatever remains. for attr in col.info.attrs_from_parent: if attr in info: del info[attr] if info: obj_attrs["__info__"] = info # Store the fully qualified class name if not isinstance(col, SerializedColumn): obj_attrs.setdefault("__class__", col.__module__ + "." + col.__class__.__name__) mixin_cols[name] = obj_attrs def represent_mixins_as_columns(tbl, exclude_classes=()): """Represent input Table ``tbl`` using only `~astropy.table.Column` or `~astropy.table.MaskedColumn` objects. This function represents any mixin columns like `~astropy.time.Time` in ``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns a new Table. A single mixin column may be split into multiple column components as needed for fully representing the column. This includes the possibility of recursive splitting, as shown in the example below. The new column names are formed as ``<column_name>.<component>``, e.g. ``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``. In addition to splitting columns, this function updates the table ``meta`` dictionary to include a dict named ``__serialized_columns__`` which provides additional information needed to construct the original mixin columns from the split columns. This function is used by astropy I/O when writing tables to ECSV, FITS, HDF5 formats. Note that if the table does not include any mixin columns then the original table is returned with no update to ``meta``. Parameters ---------- tbl : `~astropy.table.Table` or subclass Table to represent mixins as Columns exclude_classes : tuple of class Exclude any mixin columns which are instannces of any classes in the tuple Returns ------- tbl : `~astropy.table.Table` New Table with updated columns, or else the original input ``tbl`` Examples -------- >>> from astropy.table import Table, represent_mixins_as_columns >>> from astropy.time import Time >>> from astropy.coordinates import SkyCoord >>> x = [100.0, 200.0] >>> obstime = Time([1999.0, 2000.0], format='jyear') >>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime) >>> tbl = Table([sc, x], names=['sc', 'x']) >>> represent_mixins_as_columns(tbl) <Table length=2> sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x deg deg float64 float64 float64 float64 float64 ------- ------- -------------- -------------- ------- 1.0 3.0 2451180.0 -0.25 100.0 2.0 4.0 2451545.0 0.0 200.0 """ # Dict of metadata for serializing each column, keyed by column name. # Gets filled in place by _represent_mixin_as_column(). mixin_cols = {} # List of columns for the output table. For plain Column objects # this will just be the original column object. new_cols = [] # Go through table columns and represent each column as one or more # plain Column objects (in new_cols) + metadata (in mixin_cols). for col in tbl.itercols(): _represent_mixin_as_column( col, col.info.name, new_cols, mixin_cols, exclude_classes=exclude_classes ) # If no metadata was created then just return the original table. if mixin_cols: meta = deepcopy(tbl.meta) meta["__serialized_columns__"] = mixin_cols out = Table(new_cols, meta=meta, copy=False) else: out = tbl for col in out.itercols(): if not isinstance(col, Column) and col.__class__ not in exclude_classes: # This catches columns for which info has not been set up right and # therefore were not converted. See the corresponding test in # test_mixin.py for an example. raise TypeError( "failed to represent column " f"{col.info.name!r} ({col.__class__.__name__}) as one " "or more Column subclasses. This looks like a mixin class " "that does not have the correct _represent_as_dict() method " "in the class `info` attribute." ) return out def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info): # If this is a supported class then import the class and run # the _construct_from_col method. Prevent accidentally running # untrusted code by only importing known astropy classes. cls_full_name = obj_attrs.pop("__class__", None) if cls_full_name is None: # We're dealing with a SerializedColumn holding columns, stored in # obj_attrs. For this case, info holds the name (and nothing else). mixin = SerializedColumn(obj_attrs) mixin.info.name = info["name"] return mixin if cls_full_name not in __construct_mixin_classes: raise ValueError(f"unsupported class for construct {cls_full_name}") mod_name, _, cls_name = cls_full_name.rpartition(".") module = import_module(mod_name) cls = getattr(module, cls_name) for attr, value in info.items(): if attr in cls.info.attrs_from_parent: obj_attrs[attr] = value mixin = cls.info._construct_from_dict(obj_attrs) for attr, value in info.items(): if attr not in obj_attrs: setattr(mixin.info, attr, value) return mixin class _TableLite(OrderedDict): """ Minimal table-like object for _construct_mixin_from_columns. This allows manipulating the object like a Table but without the actual overhead for a full Table. More pressing, there is an issue with constructing MaskedColumn, where the encoded Column components (data, mask) are turned into a MaskedColumn. When this happens in a real table then all other columns are immediately Masked and a warning is issued. This is not desirable. """ def add_column(self, col, index=0): colnames = self.colnames self[col.info.name] = col for ii, name in enumerate(colnames): if ii >= index: self.move_to_end(name) @property def colnames(self): return list(self.keys()) def itercols(self): return self.values() def _construct_mixin_from_columns(new_name, obj_attrs, out): data_attrs_map = {} for name, val in obj_attrs.items(): if isinstance(val, SerializedColumn): # A SerializedColumn can just link to a serialized column using a name # (e.g., time.jd1), or itself be a mixin (e.g., coord.obstime). Note # that in principle a mixin could have include a column called 'name', # hence we check whether the value is actually a string (see gh-13232). if "name" in val and isinstance(val["name"], str): data_attrs_map[val["name"]] = name else: out_name = f"{new_name}.{name}" _construct_mixin_from_columns(out_name, val, out) data_attrs_map[out_name] = name for name in data_attrs_map.values(): del obj_attrs[name] # The order of data_attrs_map may not match the actual order, as it is set # by the yaml description. So, sort names by position in the serialized table. # Keep the index of the first column, so we can insert the new one there later. names = sorted(data_attrs_map, key=out.colnames.index) idx = out.colnames.index(names[0]) # Name is the column name in the table (e.g. "coord.ra") and # data_attr is the object attribute name (e.g. "ra"). A different # example would be a formatted time object that would have (e.g.) # "time_col" and "value", respectively. for name in names: obj_attrs[data_attrs_map[name]] = out[name] del out[name] info = obj_attrs.pop("__info__", {}) if len(names) == 1: # col is the first and only serialized column; in that case, use info # stored on the column. First step is to get that first column which # has been moved from `out` to `obj_attrs` above. col = obj_attrs[data_attrs_map[name]] # Now copy the relevant attributes for attr, nontrivial in ( ("unit", lambda x: x not in (None, "")), ("format", lambda x: x is not None), ("description", lambda x: x is not None), ("meta", lambda x: x), ): col_attr = getattr(col.info, attr) if nontrivial(col_attr): info[attr] = col_attr info["name"] = new_name col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info) out.add_column(col, index=idx) def _construct_mixins_from_columns(tbl): if "__serialized_columns__" not in tbl.meta: return tbl meta = tbl.meta.copy() mixin_cols = meta.pop("__serialized_columns__") out = _TableLite(tbl.columns) for new_name, obj_attrs in mixin_cols.items(): _construct_mixin_from_columns(new_name, obj_attrs, out) # If no quantity subclasses are in the output then output as Table. # For instance ascii.read(file, format='ecsv') doesn't specify an # output class and should return the minimal table class that # represents the table file. has_quantities = any(isinstance(col.info, QuantityInfo) for col in out.itercols()) out_cls = QTable if has_quantities else Table return out_cls(list(out.values()), names=out.colnames, copy=False, meta=meta)
01cd2644d020208eff5a745ea065d04c7fb31cb7e400c093d4e32dc6ad465c50
ascii_coded = ( "Ò♙♙♙♙♙♙♙♙♌♐♐♌♙♙♙♙♙♙♌♌♙♙Ò♙♙♙♙♙♙♙♘♐♐♐♈♙♙♙♙♙♌♐♐♐♔Ò♙♙♌♈♙♙♌♐♈♈♙♙♙♙♙♙♙♙♈♐♐♙Ò♙♐♙♙♙♐♐♙♙♙" "♙♙♙♙♙♙♙♙♙♙♙♙Ò♐♔♙♙♘♐♐♙♙♌♐♐♔♙♙♌♌♌♙♙♙♌Ò♐♐♙♙♘♐♐♌♙♈♐♈♙♙♙♈♐♐♙♙♘♔Ò♐♐♌♙♘♐♐♐♌♌♙♙♌♌♌♙♈♈♙♌♐" "♐Ò♘♐♐♐♌♐♐♐♐♐♐♌♙♈♙♌♐♐♐♐♐♔Ò♘♐♐♐♐♐♐♐♐♐♐♐♐♈♈♐♐♐♐♐♐♙Ò♙♘♐♐♐♐♈♐♐♐♐♐♐♙♙♐♐♐♐♐♙♙Ò♙♙♙♈♈♈♙♙♐" "♐♐♐♐♔♙♐♐♐♐♈♙♙Ò♙♙♙♙♙♙♙♙♙♈♈♐♐♐♙♈♈♈♙♙♙♙Ò" ) ascii_uncoded = "".join([chr(ord(c) - 200) for c in ascii_coded]) url = "https://media.giphy.com/media/e24Q8FKE2mxRS/giphy.gif" message_coded = "ĘĩĶĬĩĻ÷ĜĩĪĴĭèıĶļĭĺĩīļıķĶ" message_uncoded = "".join([chr(ord(c) - 200) for c in message_coded]) try: from IPython import display html = display.Image(url=url)._repr_html_() class HTMLWithBackup(display.HTML): def __init__(self, data, backup_text): super().__init__(data) self.backup_text = backup_text def __repr__(self): if self.backup_text is None: return super().__repr__() else: return self.backup_text dhtml = HTMLWithBackup(html, ascii_uncoded) display.display(dhtml) except ImportError: print(ascii_uncoded) except (UnicodeEncodeError, SyntaxError): pass
6e8da45ed9493f8dfeeb586f562a7dc488745876e7c50677a8c0e29fe489883b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Helper functions for table development, mostly creating useful tables for testing. """ import string from itertools import cycle import numpy as np from astropy.utils.data_info import ParentDtypeInfo from .table import Column, Table class TimingTables: """ Object which contains two tables and various other attributes that are useful for timing and other API tests. """ def __init__(self, size=1000, masked=False): self.masked = masked # Initialize table self.table = Table(masked=self.masked) # Create column with mixed types np.random.seed(12345) self.table["i"] = np.arange(size) self.table["a"] = np.random.random(size) # float self.table["b"] = np.random.random(size) > 0.5 # bool self.table["c"] = np.random.random((size, 10)) # 2d column self.table["d"] = np.random.choice(np.array(list(string.ascii_letters)), size) self.extra_row = {"a": 1.2, "b": True, "c": np.repeat(1, 10), "d": "Z"} self.extra_column = np.random.randint(0, 100, size) self.row_indices = np.where(self.table["a"] > 0.9)[0] self.table_grouped = self.table.group_by("d") # Another table for testing joining self.other_table = Table(masked=self.masked) self.other_table["i"] = np.arange(1, size, 3) self.other_table["f"] = np.random.random() self.other_table.sort("f") # Another table for testing hstack self.other_table_2 = Table(masked=self.masked) self.other_table_2["g"] = np.random.random(size) self.other_table_2["h"] = np.random.random((size, 10)) self.bool_mask = self.table["a"] > 0.6 def simple_table(size=3, cols=None, kinds="ifS", masked=False): """ Return a simple table for testing. Example -------- :: >>> from astropy.table.table_helpers import simple_table >>> print(simple_table(3, 6, masked=True, kinds='ifOS')) a b c d e f --- --- -------- --- --- --- -- 1.0 {'c': 2} -- 5 5.0 2 2.0 -- e 6 -- 3 -- {'e': 4} f -- 7.0 Parameters ---------- size : int Number of table rows cols : int, optional Number of table columns. Defaults to number of kinds. kinds : str String consisting of the column dtype.kinds. This string will be cycled through to generate the column dtype. The allowed values are 'i', 'f', 'S', 'O'. Returns ------- out : `Table` New table with appropriate characteristics """ if cols is None: cols = len(kinds) if cols > 26: raise ValueError("Max 26 columns in SimpleTable") columns = [] names = [chr(ord("a") + ii) for ii in range(cols)] letters = np.array([c for c in string.ascii_letters]) for jj, kind in zip(range(cols), cycle(kinds)): if kind == "i": data = np.arange(1, size + 1, dtype=np.int64) + jj elif kind == "f": data = np.arange(size, dtype=np.float64) + jj elif kind == "S": indices = (np.arange(size) + jj) % len(letters) data = letters[indices] elif kind == "O": indices = (np.arange(size) + jj) % len(letters) vals = letters[indices] data = [{val: index} for val, index in zip(vals, indices)] else: raise ValueError("Unknown data kind") columns.append(Column(data)) table = Table(columns, names=names, masked=masked) if masked: for ii, col in enumerate(table.columns.values()): mask = np.array((np.arange(size) + ii) % 3, dtype=bool) col.mask = ~mask return table def complex_table(): """ Return a masked table from the io.votable test set that has a wide variety of stressing types. """ import warnings from astropy.io.votable.table import parse from astropy.utils.data import get_pkg_data_filename with warnings.catch_warnings(): warnings.simplefilter("ignore") votable = parse( get_pkg_data_filename("../io/votable/tests/data/regression.xml"), pedantic=False, ) first_table = votable.get_first_table() table = first_table.to_table() return table class ArrayWrapperInfo(ParentDtypeInfo): _represent_as_dict_primary_data = "data" def _represent_as_dict(self): """Represent Column as a dict that can be serialized.""" col = self._parent out = {"data": col.data} return out def _construct_from_dict(self, map): """Construct Column from ``map``.""" data = map.pop("data") out = self._parent_cls(data, **map) return out class ArrayWrapper: """ Minimal mixin using a simple wrapper around a numpy array TODO: think about the future of this class as it is mostly for demonstration purposes (of the mixin protocol). Consider taking it out of core and putting it into a tutorial. One advantage of having this in core is that it is getting tested in the mixin testing though it doesn't work for multidim data. """ info = ArrayWrapperInfo() def __init__(self, data, copy=True): self.data = np.array(data, copy=copy) if "info" in getattr(data, "__dict__", ()): self.info = data.info def __getitem__(self, item): if isinstance(item, (int, np.integer)): out = self.data[item] else: out = self.__class__(self.data[item], copy=False) if "info" in self.__dict__: out.info = self.info return out def __setitem__(self, item, value): self.data[item] = value def __len__(self): return len(self.data) def __eq__(self, other): """Minimal equality testing, mostly for mixin unit tests""" if isinstance(other, ArrayWrapper): return self.data == other.data else: return self.data == other @property def dtype(self): return self.data.dtype @property def shape(self): return self.data.shape def __repr__(self): return f"<{self.__class__.__name__} name='{self.info.name}' data={self.data}>"
46d14c07ecafad6aae352015194fe4c993f1b805c453c10bb19fc8424d441ab1
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The SCEngine class uses the ``sortedcontainers`` package to implement an Index engine for Tables. """ from collections import OrderedDict from itertools import starmap from astropy.utils.compat.optional_deps import HAS_SORTEDCONTAINERS if HAS_SORTEDCONTAINERS: from sortedcontainers import SortedList class Node: __slots__ = ("key", "value") def __init__(self, key, value): self.key = key self.value = value def __lt__(self, other): if other.__class__ is Node: return (self.key, self.value) < (other.key, other.value) return self.key < other def __le__(self, other): if other.__class__ is Node: return (self.key, self.value) <= (other.key, other.value) return self.key <= other def __eq__(self, other): if other.__class__ is Node: return (self.key, self.value) == (other.key, other.value) return self.key == other def __ne__(self, other): if other.__class__ is Node: return (self.key, self.value) != (other.key, other.value) return self.key != other def __gt__(self, other): if other.__class__ is Node: return (self.key, self.value) > (other.key, other.value) return self.key > other def __ge__(self, other): if other.__class__ is Node: return (self.key, self.value) >= (other.key, other.value) return self.key >= other __hash__ = None def __repr__(self): return f"Node({self.key!r}, {self.value!r})" class SCEngine: """ Fast tree-based implementation for indexing, using the ``sortedcontainers`` package. Parameters ---------- data : Table Sorted columns of the original table row_index : Column object Row numbers corresponding to data columns unique : bool Whether the values of the index must be unique. Defaults to False. """ def __init__(self, data, row_index, unique=False): if not HAS_SORTEDCONTAINERS: raise ImportError("sortedcontainers is needed for using SCEngine") node_keys = map(tuple, data) self._nodes = SortedList(starmap(Node, zip(node_keys, row_index))) self._unique = unique def add(self, key, value): """ Add a key, value pair. """ if self._unique and (key in self._nodes): message = f"duplicate {key!r} in unique index" raise ValueError(message) self._nodes.add(Node(key, value)) def find(self, key): """ Find rows corresponding to the given key. """ return [node.value for node in self._nodes.irange(key, key)] def remove(self, key, data=None): """ Remove data from the given key. """ if data is not None: item = Node(key, data) try: self._nodes.remove(item) except ValueError: return False return True items = list(self._nodes.irange(key, key)) for item in items: self._nodes.remove(item) return bool(items) def shift_left(self, row): """ Decrement rows larger than the given row. """ for node in self._nodes: if node.value > row: node.value -= 1 def shift_right(self, row): """ Increment rows greater than or equal to the given row. """ for node in self._nodes: if node.value >= row: node.value += 1 def items(self): """ Return a list of key, data tuples. """ result = OrderedDict() for node in self._nodes: if node.key in result: result[node.key].append(node.value) else: result[node.key] = [node.value] return result.items() def sort(self): """ Make row order align with key order. """ for index, node in enumerate(self._nodes): node.value = index def sorted_data(self): """ Return a list of rows in order sorted by key. """ return [node.value for node in self._nodes] def range(self, lower, upper, bounds=(True, True)): """ Return row values in the given range. """ iterator = self._nodes.irange(lower, upper, bounds) return [node.value for node in iterator] def replace_rows(self, row_map): """ Replace rows with the values in row_map. """ nodes = [node for node in self._nodes if node.value in row_map] for node in nodes: node.value = row_map[node.value] self._nodes.clear() self._nodes.update(nodes) def __repr__(self): if len(self._nodes) > 6: nodes = list(self._nodes[:3]) + ["..."] + list(self._nodes[-3:]) else: nodes = self._nodes nodes_str = ", ".join(str(node) for node in nodes) return f"<{self.__class__.__name__} nodes={nodes_str}>"
c6d16323f6cf4df554c2261181e838c4ece9e50b2740f8d2c231bb6bac3085cb
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy.utils.data_info import ParentDtypeInfo class NdarrayMixinInfo(ParentDtypeInfo): _represent_as_dict_primary_data = "data" def _represent_as_dict(self): """Represent Column as a dict that can be serialized.""" col = self._parent out = {"data": col.view(np.ndarray)} return out def _construct_from_dict(self, map): """Construct Column from ``map``.""" data = map.pop("data") out = self._parent_cls(data, **map) return out class NdarrayMixin(np.ndarray): """ Mixin column class to allow storage of arbitrary numpy ndarrays within a Table. This is a subclass of numpy.ndarray and has the same initialization options as ``np.array()``. """ info = NdarrayMixinInfo() def __new__(cls, obj, *args, **kwargs): self = np.array(obj, *args, **kwargs).view(cls) if "info" in getattr(obj, "__dict__", ()): self.info = obj.info return self def __array_finalize__(self, obj): if obj is None: return if callable(super().__array_finalize__): super().__array_finalize__(obj) # Self was created from template (e.g. obj[slice] or (obj * 2)) # or viewcast e.g. obj.view(Column). In either case we want to # init Column attributes for self from obj if possible. if "info" in getattr(obj, "__dict__", ()): self.info = obj.info def __reduce__(self): # patch to pickle NdArrayMixin objects (ndarray subclasses), see # http://www.mail-archive.com/[email protected]/msg02446.html object_state = list(super().__reduce__()) object_state[2] = (object_state[2], self.__dict__) return tuple(object_state) def __setstate__(self, state): # patch to unpickle NdarrayMixin objects (ndarray subclasses), see # http://www.mail-archive.com/[email protected]/msg02446.html nd_state, own_state = state super().__setstate__(nd_state) self.__dict__.update(own_state)
5d97f9727cc0d35a9cedd2a510ee86b87384b26a2a38f7ef7217fc0bcd12e92e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import math import numpy as np from astropy.modeling import models from astropy.modeling.core import Fittable1DModel, Fittable2DModel from .core import Kernel, Kernel1D, Kernel2D from .utils import has_even_axis, raise_even_kernel_exception __all__ = [ "Gaussian1DKernel", "Gaussian2DKernel", "CustomKernel", "Box1DKernel", "Box2DKernel", "Tophat2DKernel", "Trapezoid1DKernel", "RickerWavelet1DKernel", "RickerWavelet2DKernel", "AiryDisk2DKernel", "Moffat2DKernel", "Model1DKernel", "Model2DKernel", "TrapezoidDisk2DKernel", "Ring2DKernel", ] def _round_up_to_odd_integer(value): i = math.ceil(value) if i % 2 == 0: return i + 1 else: return i class Gaussian1DKernel(Kernel1D): """ 1D Gaussian filter kernel. The Gaussian filter is a filter with great smoothing properties. It is isotropic and does not produce artifacts. The generated kernel is normalized so that it integrates to 1. Parameters ---------- stddev : number Standard deviation of the Gaussian kernel. x_size : int, optional Size of the kernel array. Default = ⌊8*stddev+1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. Very slow. factor : number, optional Factor of oversampling. Default factor = 10. If the factor is too large, evaluation can be very slow. See Also -------- Box1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Gaussian1DKernel gauss_1D_kernel = Gaussian1DKernel(10) plt.plot(gauss_1D_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _separable = True _is_bool = False def __init__(self, stddev, **kwargs): self._model = models.Gaussian1D(1.0 / (np.sqrt(2 * np.pi) * stddev), 0, stddev) self._default_size = _round_up_to_odd_integer(8 * stddev) super().__init__(**kwargs) self.normalize() class Gaussian2DKernel(Kernel2D): """ 2D Gaussian filter kernel. The Gaussian filter is a filter with great smoothing properties. It is isotropic and does not produce artifacts. The generated kernel is normalized so that it integrates to 1. Parameters ---------- x_stddev : float Standard deviation of the Gaussian in x before rotating by theta. y_stddev : float Standard deviation of the Gaussian in y before rotating by theta. theta : float or `~astropy.units.Quantity` ['angle'] Rotation angle. If passed as a float, it is assumed to be in radians. The rotation angle increases counterclockwise. x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*stddev + 1⌋. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*stddev + 1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Gaussian2DKernel gaussian_2D_kernel = Gaussian2DKernel(10) plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _separable = True _is_bool = False def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs): if y_stddev is None: y_stddev = x_stddev self._model = models.Gaussian2D( amplitude=1.0 / (2 * np.pi * x_stddev * y_stddev), x_mean=0, y_mean=0, x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, ) self._default_size = _round_up_to_odd_integer(8 * np.max([x_stddev, y_stddev])) super().__init__(**kwargs) self.normalize() class Box1DKernel(Kernel1D): """ 1D Box filter kernel. The Box filter or running mean is a smoothing filter. It is not isotropic and can produce artifacts when applied repeatedly to the same data. The generated kernel is normalized so that it integrates to 1. By default the Box kernel uses the ``linear_interp`` discretization mode, which allows non-shifting, even-sized kernels. This is achieved by weighting the edge pixels with 1/2. E.g a Box kernel with an effective smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5]. Parameters ---------- width : number Width of the filter kernel. mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'linear_interp' (default) Discretize model by linearly interpolating between the values at the corners of the bin. * 'center' Discretize model by taking the value at the center of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel Examples -------- Kernel response function: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Box1DKernel box_1D_kernel = Box1DKernel(9) plt.plot(box_1D_kernel, drawstyle='steps') plt.xlim(-1, 9) plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _separable = True _is_bool = True def __init__(self, width, **kwargs): self._model = models.Box1D(1.0 / width, 0, width) self._default_size = _round_up_to_odd_integer(width) kwargs["mode"] = "linear_interp" super().__init__(**kwargs) self.normalize() class Box2DKernel(Kernel2D): """ 2D Box filter kernel. The Box filter or running mean is a smoothing filter. It is not isotropic and can produce artifacts when applied repeatedly to the same data. The generated kernel is normalized so that it integrates to 1. By default the Box kernel uses the ``linear_interp`` discretization mode, which allows non-shifting, even-sized kernels. This is achieved by weighting the edge pixels with 1/2. Parameters ---------- width : number Width of the filter kernel. mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'linear_interp' (default) Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'center' Discretize model by taking the value at the center of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Box2DKernel box_2D_kernel = Box2DKernel(9) plt.imshow(box_2D_kernel, interpolation='none', origin='lower', vmin=0.0, vmax=0.015) plt.xlim(-1, 9) plt.ylim(-1, 9) plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _separable = True _is_bool = True def __init__(self, width, **kwargs): self._model = models.Box2D(1.0 / width**2, 0, 0, width, width) self._default_size = _round_up_to_odd_integer(width) kwargs["mode"] = "linear_interp" super().__init__(**kwargs) self.normalize() class Tophat2DKernel(Kernel2D): """ 2D Tophat filter kernel. The Tophat filter is an isotropic smoothing filter. It can produce artifacts when applied repeatedly on the same data. The generated kernel is normalized so that it integrates to 1. Parameters ---------- radius : int Radius of the filter kernel. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Tophat2DKernel tophat_2D_kernel = Tophat2DKernel(40) plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ def __init__(self, radius, **kwargs): self._model = models.Disk2D(1.0 / (np.pi * radius**2), 0, 0, radius) self._default_size = _round_up_to_odd_integer(2 * radius) super().__init__(**kwargs) self.normalize() class Ring2DKernel(Kernel2D): """ 2D Ring filter kernel. The Ring filter kernel is the difference between two Tophat kernels of different width. This kernel is useful for, e.g., background estimation. The generated kernel is normalized so that it integrates to 1. Parameters ---------- radius_in : number Inner radius of the ring kernel. width : number Width of the ring kernel. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Ring2DKernel ring_2D_kernel = Ring2DKernel(9, 8) plt.imshow(ring_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ def __init__(self, radius_in, width, **kwargs): radius_out = radius_in + width self._model = models.Ring2D( 1.0 / (np.pi * (radius_out**2 - radius_in**2)), 0, 0, radius_in, width ) self._default_size = _round_up_to_odd_integer(2 * radius_out) super().__init__(**kwargs) self.normalize() class Trapezoid1DKernel(Kernel1D): """ 1D trapezoid kernel. The generated kernel is normalized so that it integrates to 1. Parameters ---------- width : number Width of the filter kernel, defined as the width of the constant part, before it begins to slope down. slope : number Slope of the filter kernel's tails mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box1DKernel, Gaussian1DKernel, RickerWavelet1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Trapezoid1DKernel trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2) plt.plot(trapezoid_1D_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('amplitude') plt.xlim(-1, 28) plt.show() """ _is_bool = False def __init__(self, width, slope=1.0, **kwargs): self._model = models.Trapezoid1D(1, 0, width, slope) self._default_size = _round_up_to_odd_integer(width + 2.0 / slope) super().__init__(**kwargs) self.normalize() class TrapezoidDisk2DKernel(Kernel2D): """ 2D trapezoid kernel. The generated kernel is normalized so that it integrates to 1. Parameters ---------- radius : number Width of the filter kernel, defined as the width of the constant part, before it begins to slope down. slope : number Slope of the filter kernel's tails mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import TrapezoidDisk2DKernel trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2) plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, radius, slope=1.0, **kwargs): self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope) self._default_size = _round_up_to_odd_integer(2 * radius + 2.0 / slope) super().__init__(**kwargs) self.normalize() class RickerWavelet1DKernel(Kernel1D): """ 1D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat" kernel). The Ricker wavelet, or inverted Gaussian-Laplace filter, is a bandpass filter. It smooths the data and removes slowly varying or constant structures (e.g. Background). It is useful for peak or multi-scale detection. This kernel is derived from a normalized Gaussian function, by computing the second derivative. This results in an amplitude at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The normalization is the same as for `scipy.ndimage.gaussian_laplace`, except for a minus sign. .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this kernel. Parameters ---------- width : number Width of the filter kernel, defined as the standard deviation of the Gaussian function from which it is derived. x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width +1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import RickerWavelet1DKernel ricker_1d_kernel = RickerWavelet1DKernel(10) plt.plot(ricker_1d_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _is_bool = True def __init__(self, width, **kwargs): amplitude = 1.0 / (np.sqrt(2 * np.pi) * width**3) self._model = models.RickerWavelet1D(amplitude, 0, width) self._default_size = _round_up_to_odd_integer(8 * width) super().__init__(**kwargs) class RickerWavelet2DKernel(Kernel2D): """ 2D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat" kernel). The Ricker wavelet, or inverted Gaussian-Laplace filter, is a bandpass filter. It smooths the data and removes slowly varying or constant structures (e.g. Background). It is useful for peak or multi-scale detection. This kernel is derived from a normalized Gaussian function, by computing the second derivative. This results in an amplitude at the kernels center of 1. / (pi * width ** 4). The normalization is the same as for `scipy.ndimage.gaussian_laplace`, except for a minus sign. .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this kernel. Parameters ---------- width : number Width of the filter kernel, defined as the standard deviation of the Gaussian function from which it is derived. x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width +1⌋. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*width +1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import RickerWavelet2DKernel ricker_2d_kernel = RickerWavelet2DKernel(10) plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, width, **kwargs): amplitude = 1.0 / (np.pi * width**4) self._model = models.RickerWavelet2D(amplitude, 0, 0, width) self._default_size = _round_up_to_odd_integer(8 * width) super().__init__(**kwargs) class AiryDisk2DKernel(Kernel2D): """ 2D Airy disk kernel. This kernel models the diffraction pattern of a circular aperture. The generated kernel is normalized so that it integrates to 1. Parameters ---------- radius : float The radius of the Airy disk kernel (radius of the first zero). x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import AiryDisk2DKernel airydisk_2D_kernel = AiryDisk2DKernel(10) plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, radius, **kwargs): self._model = models.AiryDisk2D(1, 0, 0, radius) self._default_size = _round_up_to_odd_integer(8 * radius) super().__init__(**kwargs) self.normalize() class Moffat2DKernel(Kernel2D): """ 2D Moffat kernel. This kernel is a typical model for a seeing limited PSF. The generated kernel is normalized so that it integrates to 1. Parameters ---------- gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Moffat2DKernel moffat_2D_kernel = Moffat2DKernel(3, 2) plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, gamma, alpha, **kwargs): # Compute amplitude, from # https://en.wikipedia.org/wiki/Moffat_distribution amplitude = (alpha - 1.0) / (np.pi * gamma * gamma) self._model = models.Moffat2D(amplitude, 0, 0, gamma, alpha) self._default_size = _round_up_to_odd_integer(4.0 * self._model.fwhm) super().__init__(**kwargs) self.normalize() class Model1DKernel(Kernel1D): """ Create kernel from 1D model. The model has to be centered on x = 0. Parameters ---------- model : `~astropy.modeling.Fittable1DModel` Kernel response function model x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width +1⌋. Must be odd. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. Raises ------ TypeError If model is not an instance of `~astropy.modeling.Fittable1DModel` See also -------- Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel` CustomKernel : Create kernel from list or array Examples -------- Define a Gaussian1D model: >>> from astropy.modeling.models import Gaussian1D >>> from astropy.convolution.kernels import Model1DKernel >>> gauss = Gaussian1D(1, 0, 2) And create a custom one dimensional kernel from it: >>> gauss_kernel = Model1DKernel(gauss, x_size=9) This kernel can now be used like a usual Astropy kernel. """ _separable = False _is_bool = False def __init__(self, model, **kwargs): if isinstance(model, Fittable1DModel): self._model = model else: raise TypeError("Must be Fittable1DModel") super().__init__(**kwargs) class Model2DKernel(Kernel2D): """ Create kernel from 2D model. The model has to be centered on x = 0 and y = 0. Parameters ---------- model : `~astropy.modeling.Fittable2DModel` Kernel response function model x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width +1⌋. Must be odd. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*width +1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. Raises ------ TypeError If model is not an instance of `~astropy.modeling.Fittable2DModel` See also -------- Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel` CustomKernel : Create kernel from list or array Examples -------- Define a Gaussian2D model: >>> from astropy.modeling.models import Gaussian2D >>> from astropy.convolution.kernels import Model2DKernel >>> gauss = Gaussian2D(1, 0, 0, 2, 2) And create a custom two dimensional kernel from it: >>> gauss_kernel = Model2DKernel(gauss, x_size=9) This kernel can now be used like a usual astropy kernel. """ _is_bool = False _separable = False def __init__(self, model, **kwargs): self._separable = False if isinstance(model, Fittable2DModel): self._model = model else: raise TypeError("Must be Fittable2DModel") super().__init__(**kwargs) class CustomKernel(Kernel): """ Create filter kernel from list or array. Parameters ---------- array : list or array Filter kernel array. Size must be odd. Raises ------ TypeError If array is not a list or array. `~astropy.convolution.KernelSizeError` If array size is even. See also -------- Model2DKernel, Model1DKernel Examples -------- Define one dimensional array: >>> from astropy.convolution.kernels import CustomKernel >>> import numpy as np >>> array = np.array([1, 2, 3, 2, 1]) >>> kernel = CustomKernel(array) >>> kernel.dimension 1 Define two dimensional array: >>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]) >>> kernel = CustomKernel(array) >>> kernel.dimension 2 """ def __init__(self, array): self.array = array super().__init__(self._array) @property def array(self): """ Filter kernel array. """ return self._array @array.setter def array(self, array): """ Filter kernel array setter """ if isinstance(array, np.ndarray): self._array = array.astype(np.float64) elif isinstance(array, list): self._array = np.array(array, dtype=np.float64) else: raise TypeError("Must be list or array.") # Check if array is odd in all axes if has_even_axis(self): raise_even_kernel_exception() # Check if array is bool ones = self._array == 1.0 zeros = self._array == 0 self._is_bool = bool(np.all(np.logical_or(ones, zeros)))
ac36fce012b3d3ef966584b377c940e27e691250efb5a0f0c13f62e136d3df73
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains the convolution and filter functionalities of astropy. A few conceptual notes: A filter kernel is mainly characterized by its response function. In the 1D case we speak of "impulse response function", in the 2D case we call it "point spread function". This response function is given for every kernel by an astropy `FittableModel`, which is evaluated on a grid to obtain a filter array, which can then be applied to binned data. The model is centered on the array and should have an amplitude such that the array integrates to one per default. Currently only symmetric 2D kernels are supported. """ import copy import warnings import numpy as np from astropy.utils.exceptions import AstropyUserWarning from .utils import add_kernel_arrays_1D, add_kernel_arrays_2D, discretize_model MAX_NORMALIZATION = 100 __all__ = ["Kernel", "Kernel1D", "Kernel2D", "kernel_arithmetics"] class Kernel: """ Convolution kernel base class. Parameters ---------- array : ndarray Kernel array. """ _separable = False _is_bool = True _model = None def __init__(self, array): self._array = np.asanyarray(array) @property def truncation(self): """ Absolute deviation of the sum of the kernel array values from one. """ return np.abs(1.0 - self._array.sum()) @property def is_bool(self): """ Indicates if kernel is bool. If the kernel is bool the multiplication in the convolution could be omitted, to increase the performance. """ return self._is_bool @property def model(self): """ Kernel response model. """ return self._model @property def dimension(self): """ Kernel dimension. """ return self.array.ndim @property def center(self): """ Index of the kernel center. """ return [axes_size // 2 for axes_size in self._array.shape] def normalize(self, mode="integral"): """ Normalize the filter kernel. Parameters ---------- mode : {'integral', 'peak'} One of the following modes: * 'integral' (default) Kernel is normalized such that its integral = 1. * 'peak' Kernel is normalized such that its peak = 1. """ if mode == "integral": normalization = self._array.sum() elif mode == "peak": normalization = self._array.max() else: raise ValueError("invalid mode, must be 'integral' or 'peak'") # Warn the user for kernels that sum to zero if normalization == 0: warnings.warn( "The kernel cannot be normalized because it sums to zero.", AstropyUserWarning, ) else: np.divide(self._array, normalization, self._array) self._kernel_sum = self._array.sum() @property def shape(self): """ Shape of the kernel array. """ return self._array.shape @property def separable(self): """ Indicates if the filter kernel is separable. A 2D filter is separable, when its filter array can be written as the outer product of two 1D arrays. If a filter kernel is separable, higher dimension convolutions will be performed by applying the 1D filter array consecutively on every dimension. This is significantly faster, than using a filter array with the same dimension. """ return self._separable @property def array(self): """ Filter kernel array. """ return self._array def __add__(self, kernel): """ Add two filter kernels. """ return kernel_arithmetics(self, kernel, "add") def __sub__(self, kernel): """ Subtract two filter kernels. """ return kernel_arithmetics(self, kernel, "sub") def __mul__(self, value): """ Multiply kernel with number or convolve two kernels. """ return kernel_arithmetics(self, value, "mul") def __rmul__(self, value): """ Multiply kernel with number or convolve two kernels. """ return kernel_arithmetics(self, value, "mul") def __array__(self): """ Array representation of the kernel. """ return self._array def __array_wrap__(self, array, context=None): """ Wrapper for multiplication with numpy arrays. """ if type(context[0]) == np.ufunc: return NotImplemented else: return array class Kernel1D(Kernel): """ Base class for 1D filter kernels. Parameters ---------- model : `~astropy.modeling.FittableModel` Model to be evaluated. x_size : int or None, optional Size of the kernel array. Default = ⌊8*width+1⌋. Only used if ``array`` is None. array : ndarray or None, optional Kernel array. width : number Width of the filter kernel. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. """ def __init__(self, model=None, x_size=None, array=None, **kwargs): # Initialize from model if self._model: if array is not None: # Reject "array" keyword for kernel models, to avoid them not being # populated as expected. raise TypeError("Array argument not allowed for kernel models.") if x_size is None: x_size = self._default_size elif x_size != int(x_size): raise TypeError("x_size should be an integer") # Set ranges where to evaluate the model if x_size % 2 == 0: # even kernel x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5) else: # odd kernel x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1) array = discretize_model(self._model, x_range, **kwargs) # Initialize from array elif array is None: raise TypeError("Must specify either array or model.") super().__init__(array) class Kernel2D(Kernel): """ Base class for 2D filter kernels. Parameters ---------- model : `~astropy.modeling.FittableModel` Model to be evaluated. x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width + 1⌋. Only used if ``array`` is None. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*width + 1⌋. Only used if ``array`` is None, array : ndarray or None, optional Kernel array. Default is None. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. width : number Width of the filter kernel. factor : number, optional Factor of oversampling. Default factor = 10. """ def __init__(self, model=None, x_size=None, y_size=None, array=None, **kwargs): # Initialize from model if self._model: if array is not None: # Reject "array" keyword for kernel models, to avoid them not being # populated as expected. raise TypeError("Array argument not allowed for kernel models.") if x_size is None: x_size = self._default_size elif x_size != int(x_size): raise TypeError("x_size should be an integer") if y_size is None: y_size = x_size elif y_size != int(y_size): raise TypeError("y_size should be an integer") # Set ranges where to evaluate the model if x_size % 2 == 0: # even kernel x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5) else: # odd kernel x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1) if y_size % 2 == 0: # even kernel y_range = (-(int(y_size)) // 2 + 0.5, (int(y_size)) // 2 + 0.5) else: # odd kernel y_range = (-(int(y_size) - 1) // 2, (int(y_size) - 1) // 2 + 1) array = discretize_model(self._model, x_range, y_range, **kwargs) # Initialize from array elif array is None: raise TypeError("Must specify either array or model.") super().__init__(array) def kernel_arithmetics(kernel, value, operation): """ Add, subtract or multiply two kernels. Parameters ---------- kernel : `astropy.convolution.Kernel` Kernel instance. value : `astropy.convolution.Kernel`, float, or int Value to operate with. operation : {'add', 'sub', 'mul'} One of the following operations: * 'add' Add two kernels * 'sub' Subtract two kernels * 'mul' Multiply kernel with number or convolve two kernels. """ # 1D kernels if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D): if operation == "add": new_array = add_kernel_arrays_1D(kernel.array, value.array) if operation == "sub": new_array = add_kernel_arrays_1D(kernel.array, -value.array) if operation == "mul": raise Exception( "Kernel operation not supported. Maybe you want " "to use convolve(kernel1, kernel2) instead." ) new_kernel = Kernel1D(array=new_array) new_kernel._separable = kernel._separable and value._separable new_kernel._is_bool = kernel._is_bool or value._is_bool # 2D kernels elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D): if operation == "add": new_array = add_kernel_arrays_2D(kernel.array, value.array) if operation == "sub": new_array = add_kernel_arrays_2D(kernel.array, -value.array) if operation == "mul": raise Exception( "Kernel operation not supported. Maybe you want " "to use convolve(kernel1, kernel2) instead." ) new_kernel = Kernel2D(array=new_array) new_kernel._separable = kernel._separable and value._separable new_kernel._is_bool = kernel._is_bool or value._is_bool # kernel and number elif isinstance(kernel, (Kernel1D, Kernel2D)) and np.isscalar(value): if operation == "mul": new_kernel = copy.copy(kernel) new_kernel._array *= value else: raise Exception("Kernel operation not supported.") else: raise Exception("Kernel operation not supported.") return new_kernel
46427387fd8cfd9dcc16e41628e53d3d8482b53df505280284198b26b7e818cb
# Licensed under a 3-clause BSD style license - see LICENSE.rst from .convolve import ( convolve, convolve_fft, convolve_models, convolve_models_fft, interpolate_replace_nans, ) from .core import * from .kernels import * from .utils import *
d58832d69e7cc2d1625ac72db516d110b176e1e93f968b753fdc8f0257d61f77
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import sys import numpy from setuptools import Extension C_CONVOLVE_PKGDIR = os.path.relpath(os.path.dirname(__file__)) extra_compile_args = ["-UNDEBUG"] if not sys.platform.startswith("win"): extra_compile_args.append("-fPIC") def get_extensions(): # Add '-Rpass-missed=.*' to ``extra_compile_args`` when compiling with clang # to report missed optimizations sources = [ os.path.join(C_CONVOLVE_PKGDIR, "_convolve.pyx"), os.path.join(C_CONVOLVE_PKGDIR, "src", "convolve.c"), ] _convolve_ext = Extension( name="astropy.convolution._convolve", extra_compile_args=extra_compile_args, include_dirs=[numpy.get_include()], sources=sources, ) return [_convolve_ext]
b90018ade96adb97db6558f29b0b6054cb2198887bc17ed1517a4b6b2f6042f9
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np from astropy.modeling.core import Model, custom_model __all__ = ["discretize_model", "KernelSizeError"] class DiscretizationError(Exception): """ Called when discretization of models goes wrong. """ class KernelSizeError(Exception): """ Called when size of kernels is even. """ def has_even_axis(array): if isinstance(array, (list, tuple)): return not len(array) % 2 else: return any(not axes_size % 2 for axes_size in array.shape) def raise_even_kernel_exception(): raise KernelSizeError("Kernel size must be odd in all axes.") def add_kernel_arrays_1D(array_1, array_2): """ Add two 1D kernel arrays of different size. The arrays are added with the centers lying upon each other. """ if array_1.size > array_2.size: new_array = array_1.copy() center = array_1.size // 2 slice_ = slice(center - array_2.size // 2, center + array_2.size // 2 + 1) new_array[slice_] += array_2 return new_array elif array_2.size > array_1.size: new_array = array_2.copy() center = array_2.size // 2 slice_ = slice(center - array_1.size // 2, center + array_1.size // 2 + 1) new_array[slice_] += array_1 return new_array return array_2 + array_1 def add_kernel_arrays_2D(array_1, array_2): """ Add two 2D kernel arrays of different size. The arrays are added with the centers lying upon each other. """ if array_1.size > array_2.size: new_array = array_1.copy() center = [axes_size // 2 for axes_size in array_1.shape] slice_x = slice( center[1] - array_2.shape[1] // 2, center[1] + array_2.shape[1] // 2 + 1 ) slice_y = slice( center[0] - array_2.shape[0] // 2, center[0] + array_2.shape[0] // 2 + 1 ) new_array[slice_y, slice_x] += array_2 return new_array elif array_2.size > array_1.size: new_array = array_2.copy() center = [axes_size // 2 for axes_size in array_2.shape] slice_x = slice( center[1] - array_1.shape[1] // 2, center[1] + array_1.shape[1] // 2 + 1 ) slice_y = slice( center[0] - array_1.shape[0] // 2, center[0] + array_1.shape[0] // 2 + 1 ) new_array[slice_y, slice_x] += array_1 return new_array return array_2 + array_1 def discretize_model(model, x_range, y_range=None, mode="center", factor=10): """ Function to evaluate analytical model functions on a grid. So far the function can only deal with pixel coordinates. Parameters ---------- model : `~astropy.modeling.Model` or callable. Analytic model function to be discretized. Callables, which are not an instances of `~astropy.modeling.Model` are passed to `~astropy.modeling.custom_model` and then evaluated. x_range : tuple x range in which the model is evaluated. The difference between the upper an lower limit must be a whole number, so that the output array size is well defined. y_range : tuple, optional y range in which the model is evaluated. The difference between the upper an lower limit must be a whole number, so that the output array size is well defined. Necessary only for 2D models. mode : str, optional One of the following modes: * ``'center'`` (default) Discretize model by taking the value at the center of the bin. * ``'linear_interp'`` Discretize model by linearly interpolating between the values at the corners of the bin. For 2D models interpolation is bilinear. * ``'oversample'`` Discretize model by taking the average on an oversampled grid. * ``'integrate'`` Discretize model by integrating the model over the bin using `scipy.integrate.quad`. Very slow. factor : float or int Factor of oversampling. Default = 10. Returns ------- array : `numpy.array` Model value array Notes ----- The ``oversample`` mode allows to conserve the integral on a subpixel scale. Here is the example of a normalized Gaussian1D: .. plot:: :include-source: import matplotlib.pyplot as plt import numpy as np from astropy.modeling.models import Gaussian1D from astropy.convolution.utils import discretize_model gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5) y_center = discretize_model(gauss_1D, (-2, 3), mode='center') y_corner = discretize_model(gauss_1D, (-2, 3), mode='linear_interp') y_oversample = discretize_model(gauss_1D, (-2, 3), mode='oversample') plt.plot(y_center, label='center sum = {0:3f}'.format(y_center.sum())) plt.plot(y_corner, label='linear_interp sum = {0:3f}'.format(y_corner.sum())) plt.plot(y_oversample, label='oversample sum = {0:3f}'.format(y_oversample.sum())) plt.xlabel('pixels') plt.ylabel('value') plt.legend() plt.show() """ if not callable(model): raise TypeError("Model must be callable.") if not isinstance(model, Model): model = custom_model(model)() ndim = model.n_inputs if ndim > 2: raise ValueError("discretize_model only supports 1-d and 2-d models.") if not float(np.diff(x_range)).is_integer(): raise ValueError( "The difference between the upper and lower limit of" " 'x_range' must be a whole number." ) if y_range: if not float(np.diff(y_range)).is_integer(): raise ValueError( "The difference between the upper and lower limit of" " 'y_range' must be a whole number." ) if ndim == 2 and y_range is None: raise ValueError("y range not specified, but model is 2-d") if ndim == 1 and y_range is not None: raise ValueError("y range specified, but model is only 1-d.") if mode == "center": if ndim == 1: return discretize_center_1D(model, x_range) elif ndim == 2: return discretize_center_2D(model, x_range, y_range) elif mode == "linear_interp": if ndim == 1: return discretize_linear_1D(model, x_range) if ndim == 2: return discretize_bilinear_2D(model, x_range, y_range) elif mode == "oversample": if ndim == 1: return discretize_oversample_1D(model, x_range, factor) if ndim == 2: return discretize_oversample_2D(model, x_range, y_range, factor) elif mode == "integrate": if ndim == 1: return discretize_integrate_1D(model, x_range) if ndim == 2: return discretize_integrate_2D(model, x_range, y_range) else: raise DiscretizationError("Invalid mode.") def discretize_center_1D(model, x_range): """ Discretize model by taking the value at the center of the bin. """ x = np.arange(*x_range) return model(x) def discretize_center_2D(model, x_range, y_range): """ Discretize model by taking the value at the center of the pixel. """ x = np.arange(*x_range) y = np.arange(*y_range) x, y = np.meshgrid(x, y) return model(x, y) def discretize_linear_1D(model, x_range): """ Discretize model by performing a linear interpolation. """ # Evaluate model 0.5 pixel outside the boundaries x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) values_intermediate_grid = model(x) return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1]) def discretize_bilinear_2D(model, x_range, y_range): """ Discretize model by performing a bilinear interpolation. """ # Evaluate model 0.5 pixel outside the boundaries x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) x, y = np.meshgrid(x, y) values_intermediate_grid = model(x, y) # Mean in y direction values = 0.5 * (values_intermediate_grid[1:, :] + values_intermediate_grid[:-1, :]) # Mean in x direction values = 0.5 * (values[:, 1:] + values[:, :-1]) return values def discretize_oversample_1D(model, x_range, factor=10): """ Discretize model by taking the average on an oversampled grid. """ # Evaluate model on oversampled grid x = np.linspace( x_range[0] - 0.5 * (1 - 1 / factor), x_range[1] - 0.5 * (1 + 1 / factor), num=int((x_range[1] - x_range[0]) * factor), ) values = model(x) # Reshape and compute mean values = np.reshape(values, (x.size // factor, factor)) return values.mean(axis=1) def discretize_oversample_2D(model, x_range, y_range, factor=10): """ Discretize model by taking the average on an oversampled grid. """ # Evaluate model on oversampled grid x = np.linspace( x_range[0] - 0.5 * (1 - 1 / factor), x_range[1] - 0.5 * (1 + 1 / factor), num=int((x_range[1] - x_range[0]) * factor), ) y = np.linspace( y_range[0] - 0.5 * (1 - 1 / factor), y_range[1] - 0.5 * (1 + 1 / factor), num=int((y_range[1] - y_range[0]) * factor), ) x_grid, y_grid = np.meshgrid(x, y) values = model(x_grid, y_grid) # Reshape and compute mean shape = (y.size // factor, factor, x.size // factor, factor) values = np.reshape(values, shape) return values.mean(axis=3).mean(axis=1) def discretize_integrate_1D(model, x_range): """ Discretize model by integrating numerically the model over the bin. """ from scipy.integrate import quad # Set up grid x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) values = np.array([]) # Integrate over all bins for i in range(x.size - 1): values = np.append(values, quad(model, x[i], x[i + 1])[0]) return values def discretize_integrate_2D(model, x_range, y_range): """ Discretize model by integrating the model over the pixel. """ from scipy.integrate import dblquad # Set up grid x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) values = np.empty((y.size - 1, x.size - 1)) # Integrate over all pixels for i in range(x.size - 1): for j in range(y.size - 1): values[j, i] = dblquad( func=lambda y, x: model(x, y), a=x[i], b=x[i + 1], gfun=lambda x: y[j], hfun=lambda x: y[j + 1], )[0] return values
7fd773c1f14cc6a1288df104d6484a408d2ed024613674fa4fa2561ab12e6ae7
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings from functools import partial import numpy as np from astropy import units as u from astropy.modeling.convolution import Convolution from astropy.modeling.core import SPECIAL_OPERATORS, CompoundModel from astropy.nddata import support_nddata from astropy.utils.console import human_file_size from astropy.utils.exceptions import AstropyUserWarning from ._convolve import _convolveNd_c from .core import MAX_NORMALIZATION, Kernel, Kernel1D, Kernel2D from .utils import KernelSizeError, has_even_axis, raise_even_kernel_exception # np.unique([scipy.fft.next_fast_len(i, real=True) for i in range(10000)]) # fmt: off _good_sizes = np.array( [ 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000, ] ) # fmt: on _good_range = int(np.log10(_good_sizes[-1])) # Disabling doctests when scipy isn't present. __doctest_requires__ = {("convolve_fft",): ["scipy.fft"]} BOUNDARY_OPTIONS = [None, "fill", "wrap", "extend"] def _next_fast_lengths(shape): """ Find optimal or good sizes to pad an array of ``shape`` to for better performance with `numpy.fft.*fft` and `scipy.fft.*fft`. Calculated directly with `scipy.fft.next_fast_len`, if available; otherwise looked up from list and scaled by powers of 10, if necessary. """ try: import scipy.fft return np.array([scipy.fft.next_fast_len(j) for j in shape]) except ImportError: pass newshape = np.empty(len(np.atleast_1d(shape)), dtype=int) for i, j in enumerate(shape): scale = 10 ** max(int(np.ceil(np.log10(j))) - _good_range, 0) for n in _good_sizes: if n * scale >= j: newshape[i] = n * scale break else: raise ValueError( f"No next fast length for {j} found in list of _good_sizes " f"<= {_good_sizes[-1] * scale}." ) return newshape def _copy_input_if_needed( input, dtype=float, order="C", nan_treatment=None, mask=None, fill_value=None ): # Alias input input = input.array if isinstance(input, Kernel) else input # strip quantity attributes if hasattr(input, "unit"): input = input.value output = input # Copy input try: # Anything that's masked must be turned into NaNs for the interpolation. # This requires copying. A copy is also needed for nan_treatment == 'fill' # A copy prevents possible function side-effects of the input array. if nan_treatment == "fill" or np.ma.is_masked(input) or mask is not None: if np.ma.is_masked(input): # ``np.ma.maskedarray.filled()`` returns a copy, however there # is no way to specify the return type or order etc. In addition # ``np.nan`` is a ``float`` and there is no conversion to an # ``int`` type. Therefore, a pre-fill copy is needed for non # ``float`` masked arrays. ``subok=True`` is needed to retain # ``np.ma.maskedarray.filled()``. ``copy=False`` allows the fill # to act as the copy if type and order are already correct. output = np.array( input, dtype=dtype, copy=False, order=order, subok=True ) output = output.filled(fill_value) else: # Since we're making a copy, we might as well use `subok=False` to save, # what is probably, a negligible amount of memory. output = np.array( input, dtype=dtype, copy=True, order=order, subok=False ) if mask is not None: # mask != 0 yields a bool mask for all ints/floats/bool output[mask != 0] = fill_value else: # The call below is synonymous with np.asanyarray(array, ftype=float, order='C') # The advantage of `subok=True` is that it won't copy when array is an ndarray subclass. # If it is and `subok=False` (default), then it will copy even if `copy=False`. This # uses less memory when ndarray subclasses are passed in. output = np.array(input, dtype=dtype, copy=False, order=order, subok=True) except (TypeError, ValueError) as e: raise TypeError( "input should be a Numpy array or something convertible into a float array", e, ) return output @support_nddata(data="array") def convolve( array, kernel, boundary="fill", fill_value=0.0, nan_treatment="interpolate", normalize_kernel=True, mask=None, preserve_nan=False, normalization_zero_tol=1e-8, ): """ Convolve an array with a kernel. This routine differs from `scipy.ndimage.convolve` because it includes a special treatment for ``NaN`` values. Rather than including ``NaN`` values in the array in the convolution calculation, which causes large ``NaN`` holes in the convolved array, ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Parameters ---------- array : `~astropy.nddata.NDData` or array-like The array to convolve. This should be a 1, 2, or 3-dimensional array or a list or a set of nested lists representing a 1, 2, or 3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of the `~astropy.nddata.NDData` will be used as the ``mask`` argument. kernel : `numpy.ndarray` or `~astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array, and the dimensions should be odd in all directions. If a masked array, the masked values will be replaced by ``fill_value``. boundary : str, optional A flag indicating how to handle boundaries: * `None` Set the ``result`` values to zero where the kernel extends beyond the edge of the array. * 'fill' Set values outside the array boundary to ``fill_value`` (default). * 'wrap' Periodic boundary that wrap to the other side of ``array``. * 'extend' Set values outside the array to the nearest ``array`` value. fill_value : float, optional The value to use outside the array when using ``boundary='fill'``. normalize_kernel : bool, optional Whether to normalize the kernel to have a sum of one. nan_treatment : {'interpolate', 'fill'}, optional The method used to handle NaNs in the input ``array``: * ``'interpolate'``: ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Note that if the kernel has a sum equal to zero, NaN interpolation is not possible and will raise an exception. * ``'fill'``: ``NaN`` values are replaced by ``fill_value`` prior to convolution. preserve_nan : bool, optional After performing convolution, should pixels that were originally NaN again become NaN? mask : None or ndarray, optional A "mask" array. Shape must match ``array``, and anything that is masked (i.e., not 0/`False`) will be set to NaN for the convolution. If `None`, no masking will be performed unless ``array`` is a masked array. If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is masked of it is masked in either ``mask`` *or* ``array.mask``. normalization_zero_tol : float, optional The absolute tolerance on whether the kernel is different than zero. If the kernel sums to zero to within this precision, it cannot be normalized. Default is "1e-8". Returns ------- result : `numpy.ndarray` An array with the same dimensions and as the input array, convolved with kernel. The data type depends on the input array type. If array is a floating point type, then the return array keeps the same data type, otherwise the type is ``numpy.float``. Notes ----- For masked arrays, masked values are treated as NaNs. The convolution is always done at ``numpy.float`` precision. """ if boundary not in BOUNDARY_OPTIONS: raise ValueError(f"Invalid boundary option: must be one of {BOUNDARY_OPTIONS}") if nan_treatment not in ("interpolate", "fill"): raise ValueError("nan_treatment must be one of 'interpolate','fill'") # OpenMP support is disabled at the C src code level, changing this will have # no effect. n_threads = 1 # Keep refs to originals passed_kernel = kernel passed_array = array # The C routines all need float type inputs (so, a particular # bit size, endianness, etc.). So we have to convert, which also # has the effect of making copies so we don't modify the inputs. # After this, the variables we work with will be array_internal, and # kernel_internal. However -- we do want to keep track of what type # the input array was so we can cast the result to that at the end # if it's a floating point type. Don't bother with this for lists -- # just always push those as float. # It is always necessary to make a copy of kernel (since it is modified), # but, if we just so happen to be lucky enough to have the input array # have exactly the desired type, we just alias to array_internal # Convert kernel to ndarray if not already # Copy or alias array to array_internal array_internal = _copy_input_if_needed( passed_array, dtype=float, order="C", nan_treatment=nan_treatment, mask=mask, fill_value=np.nan, ) array_dtype = getattr(passed_array, "dtype", array_internal.dtype) # Copy or alias kernel to kernel_internal kernel_internal = _copy_input_if_needed( passed_kernel, dtype=float, order="C", nan_treatment=None, mask=None, fill_value=fill_value, ) # Make sure kernel has all odd axes if has_even_axis(kernel_internal): raise_even_kernel_exception() # If both image array and kernel are Kernel instances # constrain convolution method # This must occur before the main alias/copy of ``passed_kernel`` to # ``kernel_internal`` as it is used for filling masked kernels. if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel): warnings.warn( "Both array and kernel are Kernel instances, hardwiring " "the following parameters: boundary='fill', fill_value=0," " normalize_Kernel=True, nan_treatment='interpolate'", AstropyUserWarning, ) boundary = "fill" fill_value = 0 normalize_kernel = True nan_treatment = "interpolate" # ----------------------------------------------------------------------- # From this point onwards refer only to ``array_internal`` and # ``kernel_internal``. # Assume both are base np.ndarrays and NOT subclasses e.g. NOT # ``Kernel`` nor ``np.ma.maskedarray`` classes. # ----------------------------------------------------------------------- # Check dimensionality if array_internal.ndim == 0: raise Exception("cannot convolve 0-dimensional arrays") elif array_internal.ndim > 3: raise NotImplementedError( "convolve only supports 1, 2, and 3-dimensional arrays at this time" ) elif array_internal.ndim != kernel_internal.ndim: raise Exception("array and kernel have differing number of dimensions.") array_shape = np.array(array_internal.shape) kernel_shape = np.array(kernel_internal.shape) pad_width = kernel_shape // 2 # For boundary=None only the center space is convolved. All array indices within a # distance kernel.shape//2 from the edge are completely ignored (zeroed). # E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2 # are convolved. It is therefore not possible to use this method to convolve an # array by a kernel that is larger (see note below) than the array - as ALL pixels # would be ignored leaving an array of only zeros. # Note: For even kernels the correctness condition is array_shape > kernel_shape. # For odd kernels it is: # array_shape >= kernel_shape OR # array_shape > kernel_shape-1 OR # array_shape > 2*(kernel_shape//2). # Since the latter is equal to the former two for even lengths, the latter condition is # complete. if boundary is None and not np.all(array_shape > 2 * pad_width): raise KernelSizeError( "for boundary=None all kernel axes must be smaller than array's - " "use boundary in ['fill', 'extend', 'wrap'] instead." ) # NaN interpolation significantly slows down the C convolution # computation. Since nan_treatment = 'interpolate', is the default # check whether it is even needed, if not, don't interpolate. # NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any() nan_interpolate = (nan_treatment == "interpolate") and np.isnan( array_internal.sum() ) # Check if kernel is normalizable if normalize_kernel or nan_interpolate: kernel_sum = kernel_internal.sum() kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol) if kernel_sum < 1.0 / MAX_NORMALIZATION or kernel_sums_to_zero: if nan_interpolate: raise ValueError( "Setting nan_treatment='interpolate' " "requires the kernel to be normalized, " "but the input kernel has a sum close " "to zero. For a zero-sum kernel and " "data with NaNs, set nan_treatment='fill'." ) else: raise ValueError( "The kernel can't be normalized, because " "its sum is close to zero. The sum of the " f"given kernel is < {1.0 / MAX_NORMALIZATION}" ) # Mark the NaN values so we can replace them later if interpolate_nan is # not set if preserve_nan or nan_treatment == "fill": initially_nan = np.isnan(array_internal) if nan_treatment == "fill": array_internal[initially_nan] = fill_value # Avoid any memory allocation within the C code. Allocate output array # here and pass through instead. result = np.zeros(array_internal.shape, dtype=float, order="C") embed_result_within_padded_region = True array_to_convolve = array_internal if boundary in ("fill", "extend", "wrap"): embed_result_within_padded_region = False if boundary == "fill": # This method is faster than using numpy.pad(..., mode='constant') array_to_convolve = np.full( array_shape + 2 * pad_width, fill_value=fill_value, dtype=float, order="C", ) # Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of # [pad_width[0]:-pad_width[0]] # to account for when the kernel has size of 1 making pad_width = 0. if array_internal.ndim == 1: array_to_convolve[ pad_width[0] : array_shape[0] + pad_width[0] ] = array_internal elif array_internal.ndim == 2: array_to_convolve[ pad_width[0] : array_shape[0] + pad_width[0], pad_width[1] : array_shape[1] + pad_width[1], ] = array_internal else: array_to_convolve[ pad_width[0] : array_shape[0] + pad_width[0], pad_width[1] : array_shape[1] + pad_width[1], pad_width[2] : array_shape[2] + pad_width[2], ] = array_internal else: np_pad_mode_dict = {"fill": "constant", "extend": "edge", "wrap": "wrap"} np_pad_mode = np_pad_mode_dict[boundary] pad_width = kernel_shape // 2 if array_internal.ndim == 1: np_pad_width = (pad_width[0],) elif array_internal.ndim == 2: np_pad_width = ((pad_width[0],), (pad_width[1],)) else: np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],)) array_to_convolve = np.pad( array_internal, pad_width=np_pad_width, mode=np_pad_mode ) _convolveNd_c( result, array_to_convolve, kernel_internal, nan_interpolate, embed_result_within_padded_region, n_threads, ) # So far, normalization has only occurred for nan_treatment == 'interpolate' # because this had to happen within the C extension so as to ignore # any NaNs if normalize_kernel: if not nan_interpolate: result /= kernel_sum elif nan_interpolate: result *= kernel_sum if nan_interpolate and not preserve_nan and np.isnan(result.sum()): warnings.warn( "nan_treatment='interpolate', however, NaN values detected " "post convolution. A contiguous region of NaN values, larger " "than the kernel size, are present in the input array. " "Increase the kernel size to avoid this.", AstropyUserWarning, ) if preserve_nan: result[initially_nan] = np.nan # Convert result to original data type array_unit = getattr(passed_array, "unit", None) if array_unit is not None: result <<= array_unit if isinstance(passed_array, Kernel): if isinstance(passed_array, Kernel1D): new_result = Kernel1D(array=result) elif isinstance(passed_array, Kernel2D): new_result = Kernel2D(array=result) else: raise TypeError("Only 1D and 2D Kernels are supported.") new_result._is_bool = False new_result._separable = passed_array._separable if isinstance(passed_kernel, Kernel): new_result._separable = new_result._separable and passed_kernel._separable return new_result elif array_dtype.kind == "f": # Try to preserve the input type if it's a floating point type # Avoid making another copy if possible try: return result.astype(array_dtype, copy=False) except TypeError: return result.astype(array_dtype) else: return result @support_nddata(data="array") def convolve_fft( array, kernel, boundary="fill", fill_value=0.0, nan_treatment="interpolate", normalize_kernel=True, normalization_zero_tol=1e-8, preserve_nan=False, mask=None, crop=True, return_fft=False, fft_pad=None, psf_pad=None, min_wt=0.0, allow_huge=False, fftn=np.fft.fftn, ifftn=np.fft.ifftn, complex_dtype=complex, dealias=False, ): """ Convolve an ndarray with an nd-kernel. Returns a convolved image with ``shape = array.shape``. Assumes kernel is centered. `convolve_fft` is very similar to `convolve` in that it replaces ``NaN`` values in the original image with interpolated values using the kernel as an interpolation function. However, it also includes many additional options specific to the implementation. `convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways: * It can treat ``NaN`` values as zeros or interpolate over them. * ``inf`` values are treated as ``NaN`` * It optionally pads to the nearest faster sizes to improve FFT speed. These sizes are optimized for the numpy and scipy implementations, and ``fftconvolve`` uses them by default as well; when using other external functions (see below), results may vary. * Its only valid ``mode`` is 'same' (i.e., the same shape array is returned) * It lets you use your own fft, e.g., `pyFFTW <https://pypi.org/project/pyFFTW/>`_ or `pyFFTW3 <https://pypi.org/project/PyFFTW3/0.2.1/>`_ , which can lead to performance improvements, depending on your system configuration. pyFFTW3 is threaded, and therefore may yield significant performance benefits on multi-core machines at the cost of greater memory requirements. Specify the ``fftn`` and ``ifftn`` keywords to override the default, which is `numpy.fft.fftn` and `numpy.fft.ifftn`. The `scipy.fft` functions also offer somewhat better performance and a multi-threaded option. Parameters ---------- array : `numpy.ndarray` Array to be convolved with ``kernel``. It can be of any dimensionality, though only 1, 2, and 3d arrays have been tested. kernel : `numpy.ndarray` or `astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array. The dimensions *do not* have to be odd in all directions, unlike in the non-fft `convolve` function. The kernel will be normalized if ``normalize_kernel`` is set. It is assumed to be centered (i.e., shifts may result if your kernel is asymmetric) boundary : {'fill', 'wrap'}, optional A flag indicating how to handle boundaries: * 'fill': set values outside the array boundary to fill_value (default) * 'wrap': periodic boundary The `None` and 'extend' parameters are not supported for FFT-based convolution. fill_value : float, optional The value to use outside the array when using boundary='fill'. nan_treatment : {'interpolate', 'fill'}, optional The method used to handle NaNs in the input ``array``: * ``'interpolate'``: ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Note that if the kernel has a sum equal to zero, NaN interpolation is not possible and will raise an exception. * ``'fill'``: ``NaN`` values are replaced by ``fill_value`` prior to convolution. normalize_kernel : callable or boolean, optional If specified, this is the function to divide kernel by to normalize it. e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be: ``kernel = kernel / np.sum(kernel)``. If True, defaults to ``normalize_kernel = np.sum``. normalization_zero_tol : float, optional The absolute tolerance on whether the kernel is different than zero. If the kernel sums to zero to within this precision, it cannot be normalized. Default is "1e-8". preserve_nan : bool, optional After performing convolution, should pixels that were originally NaN again become NaN? mask : None or ndarray, optional A "mask" array. Shape must match ``array``, and anything that is masked (i.e., not 0/`False`) will be set to NaN for the convolution. If `None`, no masking will be performed unless ``array`` is a masked array. If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is masked of it is masked in either ``mask`` *or* ``array.mask``. crop : bool, optional Default on. Return an image of the size of the larger of the input image and the kernel. If the image and kernel are asymmetric in opposite directions, will return the largest image in both directions. For example, if an input image has shape [100,3] but a kernel with shape [6,6] is used, the output will be [100,6]. return_fft : bool, optional Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is ``ifft(fft(image)*fft(kernel))``). Useful for making PSDs. fft_pad : bool, optional Default on. Zero-pad image to the nearest size supporting more efficient execution of the FFT, generally values factorizable into the first 3-5 prime numbers. With ``boundary='wrap'``, this will be disabled. psf_pad : bool, optional Zero-pad image to be at least the sum of the image sizes to avoid edge-wrapping when smoothing. This is enabled by default with ``boundary='fill'``, but it can be overridden with a boolean option. ``boundary='wrap'`` and ``psf_pad=True`` are not compatible. min_wt : float, optional If ignoring ``NaN`` / zeros, force all grid points with a weight less than this value to ``NaN`` (the weight of a grid point with *no* ignored neighbors is 1.0). If ``min_wt`` is zero, then all zero-weight points will be set to zero instead of ``NaN`` (which they would be otherwise, because 1/0 = nan). See the examples below. allow_huge : bool, optional Allow huge arrays in the FFT? If False, will raise an exception if the array or kernel size is >1 GB. fftn : callable, optional The fft function. Can be overridden to use your own ffts, e.g. an fftw3 wrapper or scipy's fftn, ``fft=scipy.fftpack.fftn``. ifftn : callable, optional The inverse fft function. Can be overridden the same way ``fttn``. complex_dtype : complex type, optional Which complex dtype to use. `numpy` has a range of options, from 64 to 256. dealias: bool, optional Default off. Zero-pad image to enable explicit dealiasing of convolution. With ``boundary='wrap'``, this will be disabled. Note that for an input of nd dimensions this will increase the size of the temporary arrays by at least ``1.5**nd``. This may result in significantly more memory usage. Returns ------- default : ndarray ``array`` convolved with ``kernel``. If ``return_fft`` is set, returns ``fft(array) * fft(kernel)``. If crop is not set, returns the image, but with the fft-padded size instead of the input size. Raises ------ `ValueError` If the array is bigger than 1 GB after padding, will raise this exception unless ``allow_huge`` is True. See Also -------- convolve: Convolve is a non-fft version of this code. It is more memory efficient and for small kernels can be faster. Notes ----- With ``psf_pad=True`` and a large PSF, the resulting data can become large and consume a lot of memory. See Issue https://github.com/astropy/astropy/pull/4366 and the update in https://github.com/astropy/astropy/pull/11533 for further details. Dealiasing of pseudospectral convolutions is necessary for numerical stability of the underlying algorithms. A common method for handling this is to zero pad the image by at least 1/2 to eliminate the wavenumbers which have been aliased by convolution. This is so that the aliased 1/3 of the results of the convolution computation can be thrown out. See https://doi.org/10.1175/1520-0469(1971)028%3C1074:OTEOAI%3E2.0.CO;2 https://iopscience.iop.org/article/10.1088/1742-6596/318/7/072037 Note that if dealiasing is necessary to your application, but your process is memory constrained, you may want to consider using FFTW++: https://github.com/dealias/fftwpp. It includes python wrappers for a pseudospectral convolution which will implicitly dealias your convolution without the need for additional padding. Note that one cannot use FFTW++'s convlution directly in this method as in handles the entire convolution process internally. Additionally, FFTW++ includes other useful pseudospectral methods to consider. Examples -------- >>> convolve_fft([1, 0, 3], [1, 1, 1]) array([0.33333333, 1.33333333, 1. ]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1]) array([0.5, 2. , 1.5]) >>> convolve_fft([1, 0, 3], [0, 1, 0]) # doctest: +FLOAT_CMP array([ 1.00000000e+00, -3.70074342e-17, 3.00000000e+00]) >>> convolve_fft([1, 2, 3], [1]) array([1., 2., 3.]) >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate') array([1., 0., 3.]) >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate', ... min_wt=1e-8) array([ 1., nan, 3.]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate') array([0.5, 2. , 1.5]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True) array([0.5, 2. , 1.5]) >>> import scipy.fft # optional - requires scipy >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True, ... fftn=scipy.fft.fftn, ifftn=scipy.fft.ifftn) array([0.5, 2. , 1.5]) >>> fft_mp = lambda a: scipy.fft.fftn(a, workers=-1) # use all available cores >>> ifft_mp = lambda a: scipy.fft.ifftn(a, workers=-1) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True, fftn=fft_mp, ifftn=ifft_mp) array([0.5, 2. , 1.5]) """ # Checking copied from convolve.py - however, since FFTs have real & # complex components, we change the types. Only the real part will be # returned! Note that this always makes a copy. # Check kernel is kernel instance if isinstance(kernel, Kernel): kernel = kernel.array if isinstance(array, Kernel): raise TypeError( "Can't convolve two kernels with convolve_fft. Use convolve instead." ) if nan_treatment not in ("interpolate", "fill"): raise ValueError("nan_treatment must be one of 'interpolate','fill'") # Get array quantity if it exists array_unit = getattr(array, "unit", None) # Convert array dtype to complex # and ensure that list inputs become arrays array = _copy_input_if_needed( array, dtype=complex, order="C", nan_treatment=nan_treatment, mask=mask, fill_value=np.nan, ) kernel = _copy_input_if_needed( kernel, dtype=complex, order="C", nan_treatment=None, mask=None, fill_value=0 ) # Check that the number of dimensions is compatible if array.ndim != kernel.ndim: raise ValueError("Image and kernel must have same number of dimensions") arrayshape = array.shape kernshape = kernel.shape array_size_B = ( np.product(arrayshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize ) * u.byte if array_size_B > 1 * u.GB and not allow_huge: raise ValueError( f"Size Error: Arrays will be {human_file_size(array_size_B)}. " "Use allow_huge=True to override this exception." ) # NaN and inf catching nanmaskarray = np.isnan(array) | np.isinf(array) if nan_treatment == "fill": array[nanmaskarray] = fill_value else: array[nanmaskarray] = 0 nanmaskkernel = np.isnan(kernel) | np.isinf(kernel) kernel[nanmaskkernel] = 0 if normalize_kernel is True: if kernel.sum() < 1.0 / MAX_NORMALIZATION: raise Exception( "The kernel can't be normalized, because its sum is close to zero. The" f" sum of the given kernel is < {1.0 / MAX_NORMALIZATION}" ) kernel_scale = kernel.sum() normalized_kernel = kernel / kernel_scale kernel_scale = 1 # if we want to normalize it, leave it normed! elif normalize_kernel: # try this. If a function is not passed, the code will just crash... I # think type checking would be better but PEPs say otherwise... kernel_scale = normalize_kernel(kernel) normalized_kernel = kernel / kernel_scale else: kernel_scale = kernel.sum() if np.abs(kernel_scale) < normalization_zero_tol: if nan_treatment == "interpolate": raise ValueError( "Cannot interpolate NaNs with an unnormalizable kernel" ) else: # the kernel's sum is near-zero, so it can't be scaled kernel_scale = 1 normalized_kernel = kernel else: # the kernel is normalizable; we'll temporarily normalize it # now and undo the normalization later. normalized_kernel = kernel / kernel_scale if boundary is None: warnings.warn( "The convolve_fft version of boundary=None is " "equivalent to the convolve boundary='fill'. There is " "no FFT equivalent to convolve's " "zero-if-kernel-leaves-boundary", AstropyUserWarning, ) if psf_pad is None: psf_pad = True if fft_pad is None: fft_pad = True elif boundary == "fill": # create a boundary region at least as large as the kernel if psf_pad is False: warnings.warn( f"psf_pad was set to {psf_pad}, which overrides the " "boundary='fill' setting.", AstropyUserWarning, ) else: psf_pad = True if fft_pad is None: # default is 'True' according to the docstring fft_pad = True elif boundary == "wrap": if psf_pad: raise ValueError("With boundary='wrap', psf_pad cannot be enabled.") psf_pad = False if fft_pad: raise ValueError("With boundary='wrap', fft_pad cannot be enabled.") fft_pad = False if dealias: raise ValueError("With boundary='wrap', dealias cannot be enabled.") fill_value = 0 # force zero; it should not be used elif boundary == "extend": raise NotImplementedError( "The 'extend' option is not implemented for fft-based convolution" ) # Add shapes elementwise for psf_pad. if psf_pad: # default=False # add the sizes along each dimension (bigger) newshape = np.array(arrayshape) + np.array(kernshape) else: # take the larger shape in each dimension (smaller) newshape = np.maximum(arrayshape, kernshape) if dealias: # Extend shape by 1/2 for dealiasing newshape += np.ceil(newshape / 2).astype(int) # Find ideal size for fft (was power of 2, now any powers of prime factors 2, 3, 5). if fft_pad: # default=True # Get optimized sizes from scipy. newshape = _next_fast_lengths(newshape) # perform a second check after padding array_size_C = ( np.product(newshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize ) * u.byte if array_size_C > 1 * u.GB and not allow_huge: raise ValueError( f"Size Error: Arrays will be {human_file_size(array_size_C)}. " "Use allow_huge=True to override this exception." ) # For future reference, this can be used to predict "almost exactly" # how much *additional* memory will be used. # size * (array + kernel + kernelfft + arrayfft + # (kernel*array)fft + # optional(weight image + weight_fft + weight_ifft) + # optional(returned_fft)) # total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize # * (5 + 3*((interpolate_nan or ) and kernel_is_normalized)) # + (1 + (not return_fft)) * # np.product(arrayshape)*np.dtype(complex_dtype).itemsize # + np.product(arrayshape)*np.dtype(bool).itemsize # + np.product(kernshape)*np.dtype(bool).itemsize) # ) / 1024.**3 # separate each dimension by the padding size... this is to determine the # appropriate slice size to get back to the input dimensions arrayslices = [] kernslices = [] for newdimsize, arraydimsize, kerndimsize in zip(newshape, arrayshape, kernshape): center = newdimsize - (newdimsize + 1) // 2 arrayslices += [ slice(center - arraydimsize // 2, center + (arraydimsize + 1) // 2) ] kernslices += [ slice(center - kerndimsize // 2, center + (kerndimsize + 1) // 2) ] arrayslices = tuple(arrayslices) kernslices = tuple(kernslices) if not np.all(newshape == arrayshape): if np.isfinite(fill_value): bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value else: bigarray = np.zeros(newshape, dtype=complex_dtype) bigarray[arrayslices] = array else: bigarray = array if not np.all(newshape == kernshape): bigkernel = np.zeros(newshape, dtype=complex_dtype) bigkernel[kernslices] = normalized_kernel else: bigkernel = normalized_kernel arrayfft = fftn(bigarray) # need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity kernfft = fftn(np.fft.ifftshift(bigkernel)) fftmult = arrayfft * kernfft interpolate_nan = nan_treatment == "interpolate" if interpolate_nan: if not np.isfinite(fill_value): bigimwt = np.zeros(newshape, dtype=complex_dtype) else: bigimwt = np.ones(newshape, dtype=complex_dtype) bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan wtfft = fftn(bigimwt) # You can only get to this point if kernel_is_normalized wtfftmult = wtfft * kernfft wtsm = ifftn(wtfftmult) # need to re-zero weights outside of the image (if it is padded, we # still don't weight those regions) bigimwt[arrayslices] = wtsm.real[arrayslices] else: bigimwt = 1 if np.isnan(fftmult).any(): # this check should be unnecessary; call it an insanity check raise ValueError("Encountered NaNs in convolve. This is disallowed.") fftmult *= kernel_scale if array_unit is not None: fftmult <<= array_unit if return_fft: return fftmult if interpolate_nan: with np.errstate(divide="ignore", invalid="ignore"): # divide by zeros are expected here; if the weight is zero, we want # the output to be nan or inf rifft = (ifftn(fftmult)) / bigimwt if not np.isscalar(bigimwt): if min_wt > 0.0: rifft[bigimwt < min_wt] = np.nan else: # Set anything with no weight to zero (taking into account # slight offsets due to floating-point errors). rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0 else: rifft = ifftn(fftmult) if preserve_nan: rifft[arrayslices][nanmaskarray] = np.nan if crop: result = rifft[arrayslices].real return result else: return rifft.real def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs): """ Given a data set containing NaNs, replace the NaNs by interpolating from neighboring data points with a given kernel. Parameters ---------- array : `numpy.ndarray` Array to be convolved with ``kernel``. It can be of any dimensionality, though only 1, 2, and 3d arrays have been tested. kernel : `numpy.ndarray` or `astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array. The dimensions *do not* have to be odd in all directions, unlike in the non-fft `convolve` function. The kernel will be normalized if ``normalize_kernel`` is set. It is assumed to be centered (i.e., shifts may result if your kernel is asymmetric). The kernel *must be normalizable* (i.e., its sum cannot be zero). convolve : `convolve` or `convolve_fft` One of the two convolution functions defined in this package. Returns ------- newarray : `numpy.ndarray` A copy of the original array with NaN pixels replaced with their interpolated counterparts """ if not np.any(np.isnan(array)): return array.copy() newarray = array.copy() convolved = convolve( array, kernel, nan_treatment="interpolate", normalize_kernel=True, preserve_nan=False, **kwargs, ) isnan = np.isnan(array) newarray[isnan] = convolved[isnan] return newarray def convolve_models(model, kernel, mode="convolve_fft", **kwargs): """ Convolve two models using `~astropy.convolution.convolve_fft`. Parameters ---------- model : `~astropy.modeling.core.Model` Functional model kernel : `~astropy.modeling.core.Model` Convolution kernel mode : str Keyword representing which function to use for convolution. * 'convolve_fft' : use `~astropy.convolution.convolve_fft` function. * 'convolve' : use `~astropy.convolution.convolve`. **kwargs : dict Keyword arguments to me passed either to `~astropy.convolution.convolve` or `~astropy.convolution.convolve_fft` depending on ``mode``. Returns ------- default : `~astropy.modeling.core.CompoundModel` Convolved model """ if mode == "convolve_fft": operator = SPECIAL_OPERATORS.add( "convolve_fft", partial(convolve_fft, **kwargs) ) elif mode == "convolve": operator = SPECIAL_OPERATORS.add("convolve", partial(convolve, **kwargs)) else: raise ValueError(f"Mode {mode} is not supported.") return CompoundModel(operator, model, kernel) def convolve_models_fft(model, kernel, bounding_box, resolution, cache=True, **kwargs): """ Convolve two models using `~astropy.convolution.convolve_fft`. Parameters ---------- model : `~astropy.modeling.core.Model` Functional model kernel : `~astropy.modeling.core.Model` Convolution kernel bounding_box : tuple The bounding box which encompasses enough of the support of both the ``model`` and ``kernel`` so that an accurate convolution can be computed. resolution : float The resolution that one wishes to approximate the convolution integral at. cache : optional, bool Default value True. Allow for the storage of the convolution computation for later reuse. **kwargs : dict Keyword arguments to be passed either to `~astropy.convolution.convolve` or `~astropy.convolution.convolve_fft` depending on ``mode``. Returns ------- default : `~astropy.modeling.core.CompoundModel` Convolved model """ operator = SPECIAL_OPERATORS.add("convolve_fft", partial(convolve_fft, **kwargs)) return Convolution(operator, model, kernel, bounding_box, resolution, cache)
208399b07d0119d5350420bf5546aace7821a75eaec74eab56b3fca932a61266
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains configuration and setup utilities for the Astropy project. """ from .configuration import * from .paths import *
1b5f44de79362e1bafff9b15e49dcaaf6d8ebd5246175b2037f9f7b55b6364b0
# Licensed under a 3-clause BSD style license - see LICENSE.rst """This module contains classes and functions to standardize access to configuration files for Astropy and affiliated packages. .. note:: The configuration system makes use of the 'configobj' package, which stores configuration in a text format like that used in the standard library `ConfigParser`. More information and documentation for configobj can be found at https://configobj.readthedocs.io . """ import contextlib import importlib import io import os import pkgutil import warnings from contextlib import contextmanager, nullcontext from os import path from textwrap import TextWrapper from warnings import warn from astropy.extern.configobj import configobj, validate from astropy.utils import find_current_module, silence from astropy.utils.decorators import deprecated from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning from astropy.utils.introspection import resolve_name from .paths import get_config_dir __all__ = ( "InvalidConfigurationItemWarning", "ConfigurationMissingWarning", "get_config", "reload_config", "ConfigNamespace", "ConfigItem", "generate_config", "create_config_file", ) class InvalidConfigurationItemWarning(AstropyWarning): """A Warning that is issued when the configuration value specified in the astropy configuration file does not match the type expected for that configuration value. """ # This was raised with Astropy < 4.3 when the configuration file was not found. # It is kept for compatibility and should be removed at some point. @deprecated("5.0") class ConfigurationMissingWarning(AstropyWarning): """A Warning that is issued when the configuration directory cannot be accessed (usually due to a permissions problem). If this warning appears, configuration items will be set to their defaults rather than read from the configuration file, and no configuration will persist across sessions. """ # these are not in __all__ because it's not intended that a user ever see them class ConfigurationDefaultMissingError(ValueError): """An exception that is raised when the configuration defaults (which should be generated at build-time) are missing. """ # this is used in astropy/__init__.py class ConfigurationDefaultMissingWarning(AstropyWarning): """A warning that is issued when the configuration defaults (which should be generated at build-time) are missing. """ class ConfigurationChangedWarning(AstropyWarning): """ A warning that the configuration options have changed. """ class _ConfigNamespaceMeta(type): def __init__(cls, name, bases, dict): if cls.__bases__[0] is object: return for key, val in dict.items(): if isinstance(val, ConfigItem): val.name = key class ConfigNamespace(metaclass=_ConfigNamespaceMeta): """ A namespace of configuration items. Each subpackage with configuration items should define a subclass of this class, containing `ConfigItem` instances as members. For example:: class Conf(_config.ConfigNamespace): unicode_output = _config.ConfigItem( False, 'Use Unicode characters when outputting values, ...') use_color = _config.ConfigItem( sys.platform != 'win32', 'When True, use ANSI color escape sequences when ...', aliases=['astropy.utils.console.USE_COLOR']) conf = Conf() """ def __iter__(self): for key, val in self.__class__.__dict__.items(): if isinstance(val, ConfigItem): yield key keys = __iter__ """Iterate over configuration item names.""" def values(self): """Iterate over configuration item values.""" for val in self.__class__.__dict__.values(): if isinstance(val, ConfigItem): yield val def items(self): """Iterate over configuration item ``(name, value)`` pairs.""" for key, val in self.__class__.__dict__.items(): if isinstance(val, ConfigItem): yield key, val def set_temp(self, attr, value): """ Temporarily set a configuration value. Parameters ---------- attr : str Configuration item name value : object The value to set temporarily. Examples -------- >>> import astropy >>> with astropy.conf.set_temp('use_color', False): ... pass ... # console output will not contain color >>> # console output contains color again... """ if hasattr(self, attr): return self.__class__.__dict__[attr].set_temp(value) raise AttributeError(f"No configuration parameter '{attr}'") def reload(self, attr=None): """ Reload a configuration item from the configuration file. Parameters ---------- attr : str, optional The name of the configuration parameter to reload. If not provided, reload all configuration parameters. """ if attr is not None: if hasattr(self, attr): return self.__class__.__dict__[attr].reload() raise AttributeError(f"No configuration parameter '{attr}'") for item in self.values(): item.reload() def reset(self, attr=None): """ Reset a configuration item to its default. Parameters ---------- attr : str, optional The name of the configuration parameter to reload. If not provided, reset all configuration parameters. """ if attr is not None: if hasattr(self, attr): prop = self.__class__.__dict__[attr] prop.set(prop.defaultvalue) return raise AttributeError(f"No configuration parameter '{attr}'") for item in self.values(): item.set(item.defaultvalue) class ConfigItem: """ A setting and associated value stored in a configuration file. These objects should be created as members of `ConfigNamespace` subclasses, for example:: class _Conf(config.ConfigNamespace): unicode_output = config.ConfigItem( False, 'Use Unicode characters when outputting values, and writing widgets ' 'to the console.') conf = _Conf() Parameters ---------- defaultvalue : object, optional The default value for this item. If this is a list of strings, this item will be interpreted as an 'options' value - this item must be one of those values, and the first in the list will be taken as the default value. description : str or None, optional A description of this item (will be shown as a comment in the configuration file) cfgtype : str or None, optional A type specifier like those used as the *values* of a particular key in a ``configspec`` file of ``configobj``. If None, the type will be inferred from the default value. module : str or None, optional The full module name that this item is associated with. The first element (e.g. 'astropy' if this is 'astropy.config.configuration') will be used to determine the name of the configuration file, while the remaining items determine the section. If None, the package will be inferred from the package within which this object's initializer is called. aliases : str, or list of str, optional The deprecated location(s) of this configuration item. If the config item is not found at the new location, it will be searched for at all of the old locations. Raises ------ RuntimeError If ``module`` is `None`, but the module this item is created from cannot be determined. """ # this is used to make validation faster so a Validator object doesn't # have to be created every time _validator = validate.Validator() cfgtype = None """ A type specifier like those used as the *values* of a particular key in a ``configspec`` file of ``configobj``. """ rootname = "astropy" """ Rootname sets the base path for all config files. """ def __init__( self, defaultvalue="", description=None, cfgtype=None, module=None, aliases=None ): from astropy.utils import isiterable if module is None: module = find_current_module(2) if module is None: msg1 = "Cannot automatically determine get_config module, " msg2 = "because it is not called from inside a valid module" raise RuntimeError(msg1 + msg2) else: module = module.__name__ self.module = module self.description = description self.__doc__ = description # now determine cfgtype if it is not given if cfgtype is None: if isiterable(defaultvalue) and not isinstance(defaultvalue, str): # it is an options list dvstr = [str(v) for v in defaultvalue] cfgtype = "option(" + ", ".join(dvstr) + ")" defaultvalue = dvstr[0] elif isinstance(defaultvalue, bool): cfgtype = "boolean" elif isinstance(defaultvalue, int): cfgtype = "integer" elif isinstance(defaultvalue, float): cfgtype = "float" elif isinstance(defaultvalue, str): cfgtype = "string" defaultvalue = str(defaultvalue) self.cfgtype = cfgtype self._validate_val(defaultvalue) self.defaultvalue = defaultvalue if aliases is None: self.aliases = [] elif isinstance(aliases, str): self.aliases = [aliases] else: self.aliases = aliases def __set__(self, obj, value): return self.set(value) def __get__(self, obj, objtype=None): if obj is None: return self return self() def set(self, value): """ Sets the current value of this ``ConfigItem``. This also updates the comments that give the description and type information. Parameters ---------- value The value this item should be set to. Raises ------ TypeError If the provided ``value`` is not valid for this ``ConfigItem``. """ try: value = self._validate_val(value) except validate.ValidateError as e: raise TypeError( f"Provided value for configuration item {self.name} not valid:" f" {e.args[0]}" ) sec = get_config(self.module, rootname=self.rootname) sec[self.name] = value @contextmanager def set_temp(self, value): """ Sets this item to a specified value only inside a with block. Use as:: ITEM = ConfigItem('ITEM', 'default', 'description') with ITEM.set_temp('newval'): #... do something that wants ITEM's value to be 'newval' ... print(ITEM) # ITEM is now 'default' after the with block Parameters ---------- value The value to set this item to inside the with block. """ initval = self() self.set(value) try: yield finally: self.set(initval) def reload(self): """Reloads the value of this ``ConfigItem`` from the relevant configuration file. Returns ------- val : object The new value loaded from the configuration file. """ self.set(self.defaultvalue) baseobj = get_config(self.module, True, rootname=self.rootname) secname = baseobj.name cobj = baseobj # a ConfigObj's parent is itself, so we look for the parent with that while cobj.parent is not cobj: cobj = cobj.parent newobj = configobj.ConfigObj(cobj.filename, interpolation=False) if secname is not None: if secname not in newobj: return baseobj.get(self.name) newobj = newobj[secname] if self.name in newobj: baseobj[self.name] = newobj[self.name] return baseobj.get(self.name) def __repr__(self): return ( f"<{self.__class__.__name__}: name={self.name!r} value={self()!r} at" f" 0x{id(self):x}>" ) def __str__(self): return "\n".join( ( f"{self.__class__.__name__}: {self.name}", f" cfgtype={self.cfgtype!r}", f" defaultvalue={self.defaultvalue!r}", f" description={self.description!r}", f" module={self.module}", f" value={self()!r}", ) ) def __call__(self): """Returns the value of this ``ConfigItem`` Returns ------- val : object This item's value, with a type determined by the ``cfgtype`` attribute. Raises ------ TypeError If the configuration value as stored is not this item's type. """ def section_name(section): if section == "": return "at the top-level" else: return f"in section [{section}]" options = [] sec = get_config(self.module, rootname=self.rootname) if self.name in sec: options.append((sec[self.name], self.module, self.name)) for alias in self.aliases: module, name = alias.rsplit(".", 1) sec = get_config(module, rootname=self.rootname) if "." in module: filename, module = module.split(".", 1) else: filename = module module = "" if name in sec: if "." in self.module: new_module = self.module.split(".", 1)[1] else: new_module = "" warn( f"Config parameter '{name}' {section_name(module)} of the file" f" '{get_config_filename(filename, rootname=self.rootname)}' is" f" deprecated. Use '{self.name}'" f" {section_name(new_module)} instead.", AstropyDeprecationWarning, ) options.append((sec[name], module, name)) if len(options) == 0: self.set(self.defaultvalue) options.append((self.defaultvalue, None, None)) if len(options) > 1: filename, sec = self.module.split(".", 1) warn( f"Config parameter '{self.name}' {section_name(sec)} of the file" f" '{get_config_filename(filename, rootname=self.rootname)}' is given" " by more than one alias" f" ({', '.join(['.'.join(x[1:3]) for x in options if x[1] is not None])})." " Using the first.", AstropyDeprecationWarning, ) val = options[0][0] try: return self._validate_val(val) except validate.ValidateError as e: raise TypeError(f"Configuration value not valid: {e.args[0]}") def _validate_val(self, val): """Validates the provided value based on cfgtype and returns the type-cast value throws the underlying configobj exception if it fails """ # note that this will normally use the *class* attribute `_validator`, # but if some arcane reason is needed for making a special one for an # instance or sub-class, it will be used return self._validator.check(self.cfgtype, val) # this dictionary stores the primary copy of the ConfigObj's for each # root package _cfgobjs = {} def get_config_filename(packageormod=None, rootname=None): """ Get the filename of the config file associated with the given package or module. """ cfg = get_config(packageormod, rootname=rootname) while cfg.parent is not cfg: cfg = cfg.parent return cfg.filename # This is used by testing to override the config file, so we can test # with various config files that exercise different features of the # config system. _override_config_file = None def get_config(packageormod=None, reload=False, rootname=None): """Gets the configuration object or section associated with a particular package or module. Parameters ---------- packageormod : str or None The package for which to retrieve the configuration object. If a string, it must be a valid package name, or if ``None``, the package from which this function is called will be used. reload : bool, optional Reload the file, even if we have it cached. rootname : str or None Name of the root configuration directory. If ``None`` and ``packageormod`` is ``None``, this defaults to be the name of the package from which this function is called. If ``None`` and ``packageormod`` is not ``None``, this defaults to ``astropy``. Returns ------- cfgobj : ``configobj.ConfigObj`` or ``configobj.Section`` If the requested package is a base package, this will be the ``configobj.ConfigObj`` for that package, or if it is a subpackage or module, it will return the relevant ``configobj.Section`` object. Raises ------ RuntimeError If ``packageormod`` is `None`, but the package this item is created from cannot be determined. """ if packageormod is None: packageormod = find_current_module(2) if packageormod is None: msg1 = "Cannot automatically determine get_config module, " msg2 = "because it is not called from inside a valid module" raise RuntimeError(msg1 + msg2) else: packageormod = packageormod.__name__ _autopkg = True else: _autopkg = False packageormodspl = packageormod.split(".") pkgname = packageormodspl[0] secname = ".".join(packageormodspl[1:]) if rootname is None: if _autopkg: rootname = pkgname else: rootname = "astropy" # so we don't break affiliated packages cobj = _cfgobjs.get(pkgname, None) if cobj is None or reload: cfgfn = None try: # This feature is intended only for use by the unit tests if _override_config_file is not None: cfgfn = _override_config_file else: cfgfn = path.join(get_config_dir(rootname=rootname), pkgname + ".cfg") cobj = configobj.ConfigObj(cfgfn, interpolation=False) except OSError: # This can happen when HOME is not set cobj = configobj.ConfigObj(interpolation=False) # This caches the object, so if the file becomes accessible, this # function won't see it unless the module is reloaded _cfgobjs[pkgname] = cobj if secname: # not the root package if secname not in cobj: cobj[secname] = {} return cobj[secname] else: return cobj def generate_config(pkgname="astropy", filename=None, verbose=False): """Generates a configuration file, from the list of `ConfigItem` objects for each subpackage. .. versionadded:: 4.1 Parameters ---------- pkgname : str or None The package for which to retrieve the configuration object. filename : str or file-like or None If None, the default configuration path is taken from `get_config`. """ if verbose: verbosity = nullcontext filter_warnings = AstropyDeprecationWarning else: verbosity = silence filter_warnings = Warning package = importlib.import_module(pkgname) with verbosity(), warnings.catch_warnings(): warnings.simplefilter("ignore", category=filter_warnings) for mod in pkgutil.walk_packages( path=package.__path__, prefix=package.__name__ + "." ): if mod.module_finder.path.endswith(("test", "tests")) or mod.name.endswith( "setup_package" ): # Skip test and setup_package modules continue if mod.name.split(".")[-1].startswith("_"): # Skip private modules continue with contextlib.suppress(ImportError): importlib.import_module(mod.name) wrapper = TextWrapper(initial_indent="## ", subsequent_indent="## ", width=78) if filename is None: filename = get_config_filename(pkgname) with contextlib.ExitStack() as stack: if isinstance(filename, (str, os.PathLike)): fp = stack.enter_context(open(filename, "w")) else: # assume it's a file object, or io.StringIO fp = filename # Parse the subclasses, ordered by their module name subclasses = ConfigNamespace.__subclasses__() processed = set() for conf in sorted(subclasses, key=lambda x: x.__module__): mod = conf.__module__ # Skip modules for other packages, e.g. astropy modules that # would be imported when running the function for astroquery. if mod.split(".")[0] != pkgname: continue # Check that modules are not processed twice, which can happen # when they are imported in another module. if mod in processed: continue else: processed.add(mod) print_module = True for item in conf().values(): if print_module: # If this is the first item of the module, we print the # module name, but not if this is the root package... if item.module != pkgname: modname = item.module.replace(f"{pkgname}.", "") fp.write(f"[{modname}]\n\n") print_module = False fp.write(wrapper.fill(item.description) + "\n") if isinstance(item.defaultvalue, (tuple, list)): if len(item.defaultvalue) == 0: fp.write(f"# {item.name} = ,\n\n") elif len(item.defaultvalue) == 1: fp.write(f"# {item.name} = {item.defaultvalue[0]},\n\n") else: fp.write( f"# {item.name} =" f' {",".join(map(str, item.defaultvalue))}\n\n' ) else: fp.write(f"# {item.name} = {item.defaultvalue}\n\n") def reload_config(packageormod=None, rootname=None): """Reloads configuration settings from a configuration file for the root package of the requested package/module. This overwrites any changes that may have been made in `ConfigItem` objects. This applies for any items that are based on this file, which is determined by the *root* package of ``packageormod`` (e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'`` module). Parameters ---------- packageormod : str or None The package or module name - see `get_config` for details. rootname : str or None Name of the root configuration directory - see `get_config` for details. """ sec = get_config(packageormod, True, rootname=rootname) # look for the section that is its own parent - that's the base object while sec.parent is not sec: sec = sec.parent sec.reload() def is_unedited_config_file(content, template_content=None): """ Determines if a config file can be safely replaced because it doesn't actually contain any meaningful content, i.e. if it contains only comments or is completely empty. """ buffer = io.StringIO(content) raw_cfg = configobj.ConfigObj(buffer, interpolation=True) # If any of the items is set, return False return not any(len(v) > 0 for v in raw_cfg.values()) # This function is no more used by astropy but it is kept for the other # packages that may use it (e.g. astroquery). It should be removed at some # point. # this is not in __all__ because it's not intended that a user uses it @deprecated("5.0") def update_default_config(pkg, default_cfg_dir_or_fn, version=None, rootname="astropy"): """ Checks if the configuration file for the specified package exists, and if not, copy over the default configuration. If the configuration file looks like it has already been edited, we do not write over it, but instead write a file alongside it named ``pkg.version.cfg`` as a "template" for the user. Parameters ---------- pkg : str The package to be updated. default_cfg_dir_or_fn : str The filename or directory name where the default configuration file is. If a directory name, ``'pkg.cfg'`` will be used in that directory. version : str, optional The current version of the given package. If not provided, it will be obtained from ``pkg.__version__``. rootname : str Name of the root configuration directory. Returns ------- updated : bool If the profile was updated, `True`, otherwise `False`. Raises ------ AttributeError If the version number of the package could not determined. """ if path.isdir(default_cfg_dir_or_fn): default_cfgfn = path.join(default_cfg_dir_or_fn, pkg + ".cfg") else: default_cfgfn = default_cfg_dir_or_fn if not path.isfile(default_cfgfn): # There is no template configuration file, which basically # means the affiliated package is not using the configuration # system, so just return. return False cfgfn = get_config(pkg, rootname=rootname).filename with open(default_cfgfn, encoding="latin-1") as fr: template_content = fr.read() doupdate = False if cfgfn is not None: if path.exists(cfgfn): with open(cfgfn, encoding="latin-1") as fd: content = fd.read() identical = content == template_content if not identical: doupdate = is_unedited_config_file(content, template_content) elif path.exists(path.dirname(cfgfn)): doupdate = True identical = False if version is None: version = resolve_name(pkg, "__version__") # Don't install template files for dev versions, or we'll end up # spamming `~/.astropy/config`. if version and "dev" not in version and cfgfn is not None: template_path = path.join( get_config_dir(rootname=rootname), f"{pkg}.{version}.cfg" ) needs_template = not path.exists(template_path) else: needs_template = False if doupdate or needs_template: if needs_template: with open(template_path, "w", encoding="latin-1") as fw: fw.write(template_content) # If we just installed a new template file and we can't # update the main configuration file because it has user # changes, display a warning. if not identical and not doupdate: warn( f"The configuration options in {pkg} {version} may have changed, " "your configuration file was not updated in order to " "preserve local changes. A new configuration template " f"has been saved to '{template_path}'.", ConfigurationChangedWarning, ) if doupdate and not identical: with open(cfgfn, "w", encoding="latin-1") as fw: fw.write(template_content) return True return False def create_config_file(pkg, rootname="astropy", overwrite=False): """ Create the default configuration file for the specified package. If the file already exists, it is updated only if it has not been modified. Otherwise the ``overwrite`` flag is needed to overwrite it. Parameters ---------- pkg : str The package to be updated. rootname : str Name of the root configuration directory. overwrite : bool Force updating the file if it already exists. Returns ------- updated : bool If the profile was updated, `True`, otherwise `False`. """ # local import to prevent using the logger before it is configured from astropy.logger import log cfgfn = get_config_filename(pkg, rootname=rootname) # generate the default config template template_content = io.StringIO() generate_config(pkg, template_content) template_content.seek(0) template_content = template_content.read() doupdate = True # if the file already exists, check that it has not been modified if cfgfn is not None and path.exists(cfgfn): with open(cfgfn, encoding="latin-1") as fd: content = fd.read() doupdate = is_unedited_config_file(content, template_content) if doupdate or overwrite: with open(cfgfn, "w", encoding="latin-1") as fw: fw.write(template_content) log.info(f"The configuration file has been successfully written to {cfgfn}") return True elif not doupdate: log.warning( "The configuration file already exists and seems to " "have been customized, so it has not been updated. " "Use overwrite=True if you really want to update it." ) return False
a2943d3936e0f64f43e7a4028f44eddc9dc9cbc2dd6df0d1704f77ba2508eb45
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains functions to determine where configuration and data/cache files used by Astropy should be placed. """ import os import shutil import sys from functools import wraps __all__ = ["get_config_dir", "get_cache_dir", "set_temp_config", "set_temp_cache"] def _find_home(): """Locates and return the home directory (or best approximation) on this system. Raises ------ OSError If the home directory cannot be located - usually means you are running Astropy on some obscure platform that doesn't have standard home directories. """ try: homedir = os.path.expanduser("~") except Exception: # Linux, Unix, AIX, OS X if os.name == "posix": if "HOME" in os.environ: homedir = os.environ["HOME"] else: raise OSError( "Could not find unix home directory to search for " "astropy config dir" ) elif os.name == "nt": # This is for all modern Windows (NT or after) if "MSYSTEM" in os.environ and os.environ.get("HOME"): # Likely using an msys shell; use whatever it is using for its # $HOME directory homedir = os.environ["HOME"] # See if there's a local home elif "HOMEDRIVE" in os.environ and "HOMEPATH" in os.environ: homedir = os.path.join(os.environ["HOMEDRIVE"], os.environ["HOMEPATH"]) # Maybe a user profile? elif "USERPROFILE" in os.environ: homedir = os.path.join(os.environ["USERPROFILE"]) else: try: import winreg as wreg shell_folders = r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders) homedir = wreg.QueryValueEx(key, "Personal")[0] key.Close() except Exception: # As a final possible resort, see if HOME is present if "HOME" in os.environ: homedir = os.environ["HOME"] else: raise OSError( "Could not find windows home directory to " "search for astropy config dir" ) else: # for other platforms, try HOME, although it probably isn't there if "HOME" in os.environ: homedir = os.environ["HOME"] else: raise OSError( "Could not find a home directory to search for " "astropy config dir - are you on an unsupported " "platform?" ) return homedir def get_config_dir(rootname="astropy"): """ Determines the package configuration directory name and creates the directory if it doesn't exist. This directory is typically ``$HOME/.astropy/config``, but if the XDG_CONFIG_HOME environment variable is set and the ``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory. If neither exists, the former will be created and symlinked to the latter. Parameters ---------- rootname : str Name of the root configuration directory. For example, if ``rootname = 'pkgname'``, the configuration directory would be ``<home>/.pkgname/`` rather than ``<home>/.astropy`` (depending on platform). Returns ------- configdir : str The absolute path to the configuration directory. """ # symlink will be set to this if the directory is created linkto = None # If using set_temp_config, that overrides all if set_temp_config._temp_path is not None: xch = set_temp_config._temp_path config_path = os.path.join(xch, rootname) if not os.path.exists(config_path): os.mkdir(config_path) return os.path.abspath(config_path) # first look for XDG_CONFIG_HOME xch = os.environ.get("XDG_CONFIG_HOME") if xch is not None and os.path.exists(xch): xchpth = os.path.join(xch, rootname) if not os.path.islink(xchpth): if os.path.exists(xchpth): return os.path.abspath(xchpth) else: linkto = xchpth return os.path.abspath(_find_or_create_root_dir("config", linkto, rootname)) def get_cache_dir(rootname="astropy"): """ Determines the Astropy cache directory name and creates the directory if it doesn't exist. This directory is typically ``$HOME/.astropy/cache``, but if the XDG_CACHE_HOME environment variable is set and the ``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory. If neither exists, the former will be created and symlinked to the latter. Parameters ---------- rootname : str Name of the root cache directory. For example, if ``rootname = 'pkgname'``, the cache directory will be ``<cache>/.pkgname/``. Returns ------- cachedir : str The absolute path to the cache directory. """ # symlink will be set to this if the directory is created linkto = None # If using set_temp_cache, that overrides all if set_temp_cache._temp_path is not None: xch = set_temp_cache._temp_path cache_path = os.path.join(xch, rootname) if not os.path.exists(cache_path): os.mkdir(cache_path) return os.path.abspath(cache_path) # first look for XDG_CACHE_HOME xch = os.environ.get("XDG_CACHE_HOME") if xch is not None and os.path.exists(xch): xchpth = os.path.join(xch, rootname) if not os.path.islink(xchpth): if os.path.exists(xchpth): return os.path.abspath(xchpth) else: linkto = xchpth return os.path.abspath(_find_or_create_root_dir("cache", linkto, rootname)) class _SetTempPath: _temp_path = None _default_path_getter = None def __init__(self, path=None, delete=False): if path is not None: path = os.path.abspath(path) self._path = path self._delete = delete self._prev_path = self.__class__._temp_path def __enter__(self): self.__class__._temp_path = self._path try: return self._default_path_getter("astropy") except Exception: self.__class__._temp_path = self._prev_path raise def __exit__(self, *args): self.__class__._temp_path = self._prev_path if self._delete and self._path is not None: shutil.rmtree(self._path) def __call__(self, func): """Implements use as a decorator.""" @wraps(func) def wrapper(*args, **kwargs): with self: func(*args, **kwargs) return wrapper class set_temp_config(_SetTempPath): """ Context manager to set a temporary path for the Astropy config, primarily for use with testing. If the path set by this context manager does not already exist it will be created, if possible. This may also be used as a decorator on a function to set the config path just within that function. Parameters ---------- path : str, optional The directory (which must exist) in which to find the Astropy config files, or create them if they do not already exist. If None, this restores the config path to the user's default config path as returned by `get_config_dir` as though this context manager were not in effect (this is useful for testing). In this case the ``delete`` argument is always ignored. delete : bool, optional If True, cleans up the temporary directory after exiting the temp context (default: False). """ _default_path_getter = staticmethod(get_config_dir) def __enter__(self): # Special case for the config case, where we need to reset all the # cached config objects. We do keep the cache, since some of it # may have been set programmatically rather than be stored in the # config file (e.g., iers.conf.auto_download=False for our tests). from .configuration import _cfgobjs self._cfgobjs_copy = _cfgobjs.copy() _cfgobjs.clear() return super().__enter__() def __exit__(self, *args): from .configuration import _cfgobjs _cfgobjs.clear() _cfgobjs.update(self._cfgobjs_copy) del self._cfgobjs_copy super().__exit__(*args) class set_temp_cache(_SetTempPath): """ Context manager to set a temporary path for the Astropy download cache, primarily for use with testing (though there may be other applications for setting a different cache directory, for example to switch to a cache dedicated to large files). If the path set by this context manager does not already exist it will be created, if possible. This may also be used as a decorator on a function to set the cache path just within that function. Parameters ---------- path : str The directory (which must exist) in which to find the Astropy cache files, or create them if they do not already exist. If None, this restores the cache path to the user's default cache path as returned by `get_cache_dir` as though this context manager were not in effect (this is useful for testing). In this case the ``delete`` argument is always ignored. delete : bool, optional If True, cleans up the temporary directory after exiting the temp context (default: False). """ _default_path_getter = staticmethod(get_cache_dir) def _find_or_create_root_dir(dirnm, linkto, pkgname="astropy"): innerdir = os.path.join(_find_home(), f".{pkgname}") maindir = os.path.join(_find_home(), f".{pkgname}", dirnm) if not os.path.exists(maindir): # first create .astropy dir if needed if not os.path.exists(innerdir): try: os.mkdir(innerdir) except OSError: if not os.path.isdir(innerdir): raise elif not os.path.isdir(innerdir): raise OSError( f"Intended {pkgname} {dirnm} directory {maindir} is actually a file." ) try: os.mkdir(maindir) except OSError: if not os.path.isdir(maindir): raise if ( not sys.platform.startswith("win") and linkto is not None and not os.path.exists(linkto) ): os.symlink(maindir, linkto) elif not os.path.isdir(maindir): raise OSError( f"Intended {pkgname} {dirnm} directory {maindir} is actually a file." ) return os.path.abspath(maindir)
c71ce72fcf6edaf77039354e7993601e933b7a683bd5c8166d3c6435d574b588
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Distribution class and associated machinery. """ import builtins import numpy as np from astropy import stats from astropy import units as u __all__ = ["Distribution"] # we set this by hand because the symbolic expression (below) requires scipy # SMAD_SCALE_FACTOR = 1 / scipy.stats.norm.ppf(0.75) SMAD_SCALE_FACTOR = 1.48260221850560203193936104071326553821563720703125 class Distribution: """ A scalar value or array values with associated uncertainty distribution. This object will take its exact type from whatever the ``samples`` argument is. In general this is expected to be an `~astropy.units.Quantity` or `numpy.ndarray`, although anything compatible with `numpy.asanyarray` is possible. See also: https://docs.astropy.org/en/stable/uncertainty/ Parameters ---------- samples : array-like The distribution, with sampling along the *leading* axis. If 1D, the sole dimension is used as the sampling axis (i.e., it is a scalar distribution). """ _generated_subclasses = {} def __new__(cls, samples): if isinstance(samples, Distribution): samples = samples.distribution else: samples = np.asanyarray(samples, order="C") if samples.shape == (): raise TypeError("Attempted to initialize a Distribution with a scalar") new_dtype = np.dtype( {"names": ["samples"], "formats": [(samples.dtype, (samples.shape[-1],))]} ) samples_cls = type(samples) new_cls = cls._generated_subclasses.get(samples_cls) if new_cls is None: # Make a new class with the combined name, inserting Distribution # itself below the samples class since that way Quantity methods # like ".to" just work (as .view() gets intercepted). However, # repr and str are problems, so we put those on top. # TODO: try to deal with this at the lower level. The problem is # that array2string does not allow one to override how structured # arrays are typeset, leading to all samples to be shown. It may # be possible to hack oneself out by temporarily becoming a void. new_name = samples_cls.__name__ + cls.__name__ new_cls = type( new_name, (_DistributionRepr, samples_cls, ArrayDistribution), {"_samples_cls": samples_cls}, ) cls._generated_subclasses[samples_cls] = new_cls self = samples.view(dtype=new_dtype, type=new_cls) # Get rid of trailing dimension of 1. self.shape = samples.shape[:-1] return self @property def distribution(self): return self["samples"] def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): converted = [] outputs = kwargs.pop("out", None) if outputs: kwargs["out"] = tuple( (output.distribution if isinstance(output, Distribution) else output) for output in outputs ) if method in {"reduce", "accumulate", "reduceat"}: axis = kwargs.get("axis", None) if axis is None: assert isinstance(inputs[0], Distribution) kwargs["axis"] = tuple(range(inputs[0].ndim)) for input_ in inputs: if isinstance(input_, Distribution): converted.append(input_.distribution) else: shape = getattr(input_, "shape", ()) if shape: converted.append(input_[..., np.newaxis]) else: converted.append(input_) results = getattr(ufunc, method)(*converted, **kwargs) if not isinstance(results, tuple): results = (results,) if outputs is None: outputs = (None,) * len(results) finals = [] for result, output in zip(results, outputs): if output is not None: finals.append(output) else: if getattr(result, "shape", False): finals.append(Distribution(result)) else: finals.append(result) return finals if len(finals) > 1 else finals[0] @property def n_samples(self): """ The number of samples of this distribution. A single `int`. """ return self.dtype["samples"].shape[0] def pdf_mean(self, dtype=None, out=None): """ The mean of this distribution. Arguments are as for `numpy.mean`. """ return self.distribution.mean(axis=-1, dtype=dtype, out=out) def pdf_std(self, dtype=None, out=None, ddof=0): """ The standard deviation of this distribution. Arguments are as for `numpy.std`. """ return self.distribution.std(axis=-1, dtype=dtype, out=out, ddof=ddof) def pdf_var(self, dtype=None, out=None, ddof=0): """ The variance of this distribution. Arguments are as for `numpy.var`. """ return self.distribution.var(axis=-1, dtype=dtype, out=out, ddof=ddof) def pdf_median(self, out=None): """ The median of this distribution. Parameters ---------- out : array, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. """ return np.median(self.distribution, axis=-1, out=out) def pdf_mad(self, out=None): """ The median absolute deviation of this distribution. Parameters ---------- out : array, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. """ median = self.pdf_median(out=out) absdiff = np.abs(self - median) return np.median( absdiff.distribution, axis=-1, out=median, overwrite_input=True ) def pdf_smad(self, out=None): """ The median absolute deviation of this distribution rescaled to match the standard deviation for a normal distribution. Parameters ---------- out : array, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. """ result = self.pdf_mad(out=out) result *= SMAD_SCALE_FACTOR return result def pdf_percentiles(self, percentile, **kwargs): """ Compute percentiles of this Distribution. Parameters ---------- percentile : float or array of float or `~astropy.units.Quantity` The desired percentiles of the distribution (i.e., on [0,100]). `~astropy.units.Quantity` will be converted to percent, meaning that a ``dimensionless_unscaled`` `~astropy.units.Quantity` will be interpreted as a quantile. Additional keywords are passed into `numpy.percentile`. Returns ------- percentiles : `~astropy.units.Quantity` ['dimensionless'] The ``fracs`` percentiles of this distribution. """ percentile = u.Quantity(percentile, u.percent).value percs = np.percentile(self.distribution, percentile, axis=-1, **kwargs) # numpy.percentile strips units for unclear reasons, so we have to make # a new object with units if hasattr(self.distribution, "_new_view"): return self.distribution._new_view(percs) else: return percs def pdf_histogram(self, **kwargs): """ Compute histogram over the samples in the distribution. Parameters ---------- All keyword arguments are passed into `astropy.stats.histogram`. Note That some of these options may not be valid for some multidimensional distributions. Returns ------- hist : array The values of the histogram. Trailing dimension is the histogram dimension. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. Trailing dimension is the bin histogram dimension. """ distr = self.distribution raveled_distr = distr.reshape(distr.size // distr.shape[-1], distr.shape[-1]) nhists = [] bin_edges = [] for d in raveled_distr: nhist, bin_edge = stats.histogram(d, **kwargs) nhists.append(nhist) bin_edges.append(bin_edge) nhists = np.array(nhists) nh_shape = self.shape + (nhists.size // self.size,) bin_edges = np.array(bin_edges) be_shape = self.shape + (bin_edges.size // self.size,) return nhists.reshape(nh_shape), bin_edges.reshape(be_shape) class ScalarDistribution(Distribution, np.void): """Scalar distribution. This class mostly exists to make `~numpy.array2print` possible for all subclasses. It is a scalar element, still with n_samples samples. """ pass class ArrayDistribution(Distribution, np.ndarray): # This includes the important override of view and __getitem__ # which are needed for all ndarray subclass Distributions, but not # for the scalar one. _samples_cls = np.ndarray # Override view so that we stay a Distribution version of the new type. def view(self, dtype=None, type=None): """New view of array with the same data. Like `~numpy.ndarray.view` except that the result will always be a new `~astropy.uncertainty.Distribution` instance. If the requested ``type`` is a `~astropy.uncertainty.Distribution`, then no change in ``dtype`` is allowed. """ if type is None and ( isinstance(dtype, builtins.type) and issubclass(dtype, np.ndarray) ): type = dtype dtype = None view_args = [item for item in (dtype, type) if item is not None] if type is None or ( isinstance(type, builtins.type) and issubclass(type, Distribution) ): if dtype is not None and dtype != self.dtype: raise ValueError( "cannot view as Distribution subclass with a new dtype." ) return super().view(*view_args) # View as the new non-Distribution class, but turn into a Distribution again. result = self.distribution.view(*view_args) return Distribution(result) # Override __getitem__ so that 'samples' is returned as the sample class. def __getitem__(self, item): result = super().__getitem__(item) if item == "samples": # Here, we need to avoid our own redefinition of view. return super(ArrayDistribution, result).view(self._samples_cls) elif isinstance(result, np.void): return result.view((ScalarDistribution, result.dtype)) else: return result class _DistributionRepr: def __repr__(self): reprarr = repr(self.distribution) if reprarr.endswith(">"): firstspace = reprarr.find(" ") reprarr = reprarr[firstspace + 1 : -1] # :-1] removes the ending '>' return ( f"<{self.__class__.__name__} {reprarr} with n_samples={self.n_samples}>" ) else: # numpy array-like firstparen = reprarr.find("(") reprarr = reprarr[firstparen:] return f"{self.__class__.__name__}{reprarr} with n_samples={self.n_samples}" def __str__(self): distrstr = str(self.distribution) toadd = f" with n_samples={self.n_samples}" return distrstr + toadd def _repr_latex_(self): if hasattr(self.distribution, "_repr_latex_"): superlatex = self.distribution._repr_latex_() toadd = rf", \; n_{{\rm samp}}={self.n_samples}" return superlatex[:-1] + toadd + superlatex[-1] else: return None class NdarrayDistribution(_DistributionRepr, ArrayDistribution): pass # Ensure our base NdarrayDistribution is known. Distribution._generated_subclasses[np.ndarray] = NdarrayDistribution
c52d4796a37b91c4ad5671d5d2ebad6e4e1a3ae72e8cdd33544921b68d46befd
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Built-in distribution-creation functions. """ from warnings import warn import numpy as np from astropy import units as u from .core import Distribution __all__ = ["normal", "poisson", "uniform"] def normal( center, *, std=None, var=None, ivar=None, n_samples, cls=Distribution, **kwargs ): """ Create a Gaussian/normal distribution. Parameters ---------- center : `~astropy.units.Quantity` The center of this distribution std : `~astropy.units.Quantity` or None The standard deviation/σ of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``var`` or ``ivar`` are set). var : `~astropy.units.Quantity` or None The variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``ivar`` are set). ivar : `~astropy.units.Quantity` or None The inverse variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``var`` are set). n_samples : int The number of Monte Carlo samples to use with this distribution cls : class The class to use to create this distribution. Typically a `Distribution` subclass. Remaining keywords are passed into the constructor of the ``cls`` Returns ------- distr : `~astropy.uncertainty.Distribution` or object The sampled Gaussian distribution. The type will be the same as the parameter ``cls``. """ center = np.asanyarray(center) if var is not None: if std is None: std = np.asanyarray(var) ** 0.5 else: raise ValueError("normal cannot take both std and var") if ivar is not None: if std is None: std = np.asanyarray(ivar) ** -0.5 else: raise ValueError("normal cannot take both ivar and and std or var") if std is None: raise ValueError("normal requires one of std, var, or ivar") else: std = np.asanyarray(std) randshape = np.broadcast(std, center).shape + (n_samples,) samples = ( center[..., np.newaxis] + np.random.randn(*randshape) * std[..., np.newaxis] ) return cls(samples, **kwargs) COUNT_UNITS = ( u.count, u.electron, u.dimensionless_unscaled, u.chan, u.bin, u.vox, u.bit, u.byte, ) def poisson(center, n_samples, cls=Distribution, **kwargs): """ Create a Poisson distribution. Parameters ---------- center : `~astropy.units.Quantity` The center value of this distribution (i.e., λ). n_samples : int The number of Monte Carlo samples to use with this distribution cls : class The class to use to create this distribution. Typically a `Distribution` subclass. Remaining keywords are passed into the constructor of the ``cls`` Returns ------- distr : `~astropy.uncertainty.Distribution` or object The sampled Poisson distribution. The type will be the same as the parameter ``cls``. """ # we convert to arrays because np.random.poisson has trouble with quantities has_unit = False if hasattr(center, "unit"): has_unit = True poissonarr = np.asanyarray(center.value) else: poissonarr = np.asanyarray(center) randshape = poissonarr.shape + (n_samples,) samples = np.random.poisson(poissonarr[..., np.newaxis], randshape) if has_unit: if center.unit == u.adu: warn( "ADUs were provided to poisson. ADUs are not strictly count" "units because they need the gain to be applied. It is " "recommended you apply the gain to convert to e.g. electrons." ) elif center.unit not in COUNT_UNITS: warn( f"Unit {center.unit} was provided to poisson, which is not one of" f' {COUNT_UNITS}, and therefore suspect as a "counting" unit. Ensure' " you mean to use Poisson statistics." ) # re-attach the unit samples = samples * center.unit return cls(samples, **kwargs) def uniform( *, lower=None, upper=None, center=None, width=None, n_samples, cls=Distribution, **kwargs, ): """ Create a Uniform distriution from the lower and upper bounds. Note that this function requires keywords to be explicit, and requires either ``lower``/``upper`` or ``center``/``width``. Parameters ---------- lower : array-like The lower edge of this distribution. If a `~astropy.units.Quantity`, the distribution will have the same units as ``lower``. upper : `~astropy.units.Quantity` The upper edge of this distribution. Must match shape and if a `~astropy.units.Quantity` must have compatible units with ``lower``. center : array-like The center value of the distribution. Cannot be provided at the same time as ``lower``/``upper``. width : array-like The width of the distribution. Must have the same shape and compatible units with ``center`` (if any). n_samples : int The number of Monte Carlo samples to use with this distribution cls : class The class to use to create this distribution. Typically a `Distribution` subclass. Remaining keywords are passed into the constructor of the ``cls`` Returns ------- distr : `~astropy.uncertainty.Distribution` or object The sampled uniform distribution. The type will be the same as the parameter ``cls``. """ if center is None and width is None: lower = np.asanyarray(lower) upper = np.asanyarray(upper) if lower.shape != upper.shape: raise ValueError("lower and upper must have consistent shapes") elif upper is None and lower is None: center = np.asanyarray(center) width = np.asanyarray(width) lower = center - width / 2 upper = center + width / 2 else: raise ValueError( "either upper/lower or center/width must be given " "to uniform - other combinations are not valid" ) newshape = lower.shape + (n_samples,) if lower.shape == tuple() and upper.shape == tuple(): width = upper - lower # scalar else: width = (upper - lower)[:, np.newaxis] lower = lower[:, np.newaxis] samples = lower + width * np.random.uniform(size=newshape) return cls(samples, **kwargs)
c3f5035dadb7b7044ff24338d4683e4baa7948636265b9f1ac3c672941aa0935
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module implements classes (called Fitters) which combine optimization algorithms (typically from `scipy.optimize`) with statistic functions to perform fitting. Fitters are implemented as callable classes. In addition to the data to fit, the ``__call__`` method takes an instance of `~astropy.modeling.core.FittableModel` as input, and returns a copy of the model with its parameters determined by the optimizer. Optimization algorithms, called "optimizers" are implemented in `~astropy.modeling.optimizers` and statistic functions are in `~astropy.modeling.statistic`. The goal is to provide an easy to extend framework and allow users to easily create new fitters by combining statistics with optimizers. There are two exceptions to the above scheme. `~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq` function. `~astropy.modeling.fitting.LevMarLSQFitter` uses `~scipy.optimize.leastsq` which combines optimization and statistic in one implementation. """ # pylint: disable=invalid-name import abc import inspect import operator import warnings from functools import reduce, wraps from importlib.metadata import entry_points import numpy as np from astropy.units import Quantity from astropy.utils.decorators import deprecated from astropy.utils.exceptions import AstropyUserWarning from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex from .spline import ( SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter, ) from .statistic import leastsquare from .utils import _combine_equivalency_dict, poly_map_domain __all__ = [ "LinearLSQFitter", "LevMarLSQFitter", "TRFLSQFitter", "DogBoxLSQFitter", "LMLSQFitter", "FittingWithOutlierRemoval", "SLSQPLSQFitter", "SimplexLSQFitter", "JointFitter", "Fitter", "ModelLinearityError", "ModelsError", "SplineExactKnotsFitter", "SplineInterpolateFitter", "SplineSmoothingFitter", "SplineSplrepFitter", ] # Statistic functions implemented in `astropy.modeling.statistic.py STATISTICS = [leastsquare] # Optimizers implemented in `astropy.modeling.optimizers.py OPTIMIZERS = [Simplex, SLSQP] class NonFiniteValueError(RuntimeError): """ Error raised when attempting to a non-finite value """ class Covariance: """Class for covariance matrix calculated by fitter.""" def __init__(self, cov_matrix, param_names): self.cov_matrix = cov_matrix self.param_names = param_names def pprint(self, max_lines, round_val): # Print and label lower triangle of covariance matrix # Print rows for params up to `max_lines`, round floats to 'round_val' longest_name = max(len(x) for x in self.param_names) ret_str = "parameter variances / covariances \n" fstring = f'{"": <{longest_name}}| {{0}}\n' for i, row in enumerate(self.cov_matrix): if i <= max_lines - 1: param = self.param_names[i] ret_str += fstring.replace(" " * len(param), param, 1).format( repr(np.round(row[: i + 1], round_val))[7:-2] ) else: ret_str += "..." return ret_str.rstrip() def __repr__(self): return self.pprint(max_lines=10, round_val=3) def __getitem__(self, params): # index covariance matrix by parameter names or indices if len(params) != 2: raise ValueError("Covariance must be indexed by two values.") if all(isinstance(item, str) for item in params): i1, i2 = self.param_names.index(params[0]), self.param_names.index( params[1] ) elif all(isinstance(item, int) for item in params): i1, i2 = params else: raise TypeError( "Covariance can be indexed by two parameter names or integer indices." ) return self.cov_matrix[i1][i2] class StandardDeviations: """Class for fitting uncertainties.""" def __init__(self, cov_matrix, param_names): self.param_names = param_names self.stds = self._calc_stds(cov_matrix) def _calc_stds(self, cov_matrix): # sometimes scipy lstsq returns a non-sensical negative vals in the # diagonals of the cov_x it computes. stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)] return stds def pprint(self, max_lines, round_val): longest_name = max(len(x) for x in self.param_names) ret_str = "standard deviations\n" for i, std in enumerate(self.stds): if i <= max_lines - 1: param = self.param_names[i] ret_str += ( f"{param}{' ' * (longest_name - len(param))}| " f"{np.round(std, round_val)}\n" ) else: ret_str += "..." return ret_str.rstrip() def __repr__(self): return self.pprint(max_lines=10, round_val=3) def __getitem__(self, param): if isinstance(param, str): i = self.param_names.index(param) elif isinstance(param, int): i = param else: raise TypeError( "Standard deviation can be indexed by parameter name or integer." ) return self.stds[i] class ModelsError(Exception): """Base class for model exceptions""" class ModelLinearityError(ModelsError): """Raised when a non-linear model is passed to a linear fitter.""" class UnsupportedConstraintError(ModelsError, ValueError): """ Raised when a fitter does not support a type of constraint. """ class _FitterMeta(abc.ABCMeta): """ Currently just provides a registry for all Fitter classes. """ registry = set() def __new__(mcls, name, bases, members): cls = super().__new__(mcls, name, bases, members) if not inspect.isabstract(cls) and not name.startswith("_"): mcls.registry.add(cls) return cls def fitter_unit_support(func): """ This is a decorator that can be used to add support for dealing with quantities to any __call__ method on a fitter which may not support quantities itself. This is done by temporarily removing units from all parameters then adding them back once the fitting has completed. """ @wraps(func) def wrapper(self, model, x, y, z=None, **kwargs): equivalencies = kwargs.pop("equivalencies", None) data_has_units = ( isinstance(x, Quantity) or isinstance(y, Quantity) or isinstance(z, Quantity) ) model_has_units = model._has_units if data_has_units or model_has_units: if model._supports_unit_fitting: # We now combine any instance-level input equivalencies with user # specified ones at call-time. input_units_equivalencies = _combine_equivalency_dict( model.inputs, equivalencies, model.input_units_equivalencies ) # If input_units is defined, we transform the input data into those # expected by the model. We hard-code the input names 'x', and 'y' # here since FittableModel instances have input names ('x',) or # ('x', 'y') if model.input_units is not None: if isinstance(x, Quantity): x = x.to( model.input_units[model.inputs[0]], equivalencies=input_units_equivalencies[model.inputs[0]], ) if isinstance(y, Quantity) and z is not None: y = y.to( model.input_units[model.inputs[1]], equivalencies=input_units_equivalencies[model.inputs[1]], ) # Create a dictionary mapping the real model inputs and outputs # names to the data. This remapping of names must be done here, after # the input data is converted to the correct units. rename_data = {model.inputs[0]: x} if z is not None: rename_data[model.outputs[0]] = z rename_data[model.inputs[1]] = y else: rename_data[model.outputs[0]] = y rename_data["z"] = None # We now strip away the units from the parameters, taking care to # first convert any parameters to the units that correspond to the # input units (to make sure that initial guesses on the parameters) # are in the right unit system model = model.without_units_for_data(**rename_data) if isinstance(model, tuple): rename_data["_left_kwargs"] = model[1] rename_data["_right_kwargs"] = model[2] model = model[0] # We strip away the units from the input itself add_back_units = False if isinstance(x, Quantity): add_back_units = True xdata = x.value else: xdata = np.asarray(x) if isinstance(y, Quantity): add_back_units = True ydata = y.value else: ydata = np.asarray(y) if z is not None: if isinstance(z, Quantity): add_back_units = True zdata = z.value else: zdata = np.asarray(z) # We run the fitting if z is None: model_new = func(self, model, xdata, ydata, **kwargs) else: model_new = func(self, model, xdata, ydata, zdata, **kwargs) # And finally we add back units to the parameters if add_back_units: model_new = model_new.with_units_from_data(**rename_data) return model_new else: raise NotImplementedError( "This model does not support being fit to data with units." ) else: return func(self, model, x, y, z=z, **kwargs) return wrapper class Fitter(metaclass=_FitterMeta): """ Base class for all fitters. Parameters ---------- optimizer : callable A callable implementing an optimization algorithm statistic : callable Statistic function """ supported_constraints = [] def __init__(self, optimizer, statistic): if optimizer is None: raise ValueError("Expected an optimizer.") if statistic is None: raise ValueError("Expected a statistic function.") if inspect.isclass(optimizer): # a callable class self._opt_method = optimizer() elif inspect.isfunction(optimizer): self._opt_method = optimizer else: raise ValueError("Expected optimizer to be a callable class or a function.") if inspect.isclass(statistic): self._stat_method = statistic() else: self._stat_method = statistic def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list parameters returned by the fitter args : list [model, [other_args], [input coordinates]] other_args may include weights or any other quantities specific for a statistic Notes ----- The list of arguments (args) is set in the `__call__` method. Fitters may overwrite this method, e.g. when statistic functions require other arguments. """ model = args[0] meas = args[-1] fitter_to_model_params(model, fps) res = self._stat_method(meas, model, *args[1:-1]) return res @staticmethod def _add_fitting_uncertainties(*args): """ When available, calculate and sets the parameter covariance matrix (model.cov_matrix) and standard deviations (model.stds). """ return None @abc.abstractmethod def __call__(self): """ This method performs the actual fitting and modifies the parameter list of a model. Fitter subclasses should implement this method. """ raise NotImplementedError("Subclasses should implement this method.") # TODO: I have ongoing branch elsewhere that's refactoring this module so that # all the fitter classes in here are Fitter subclasses. In the meantime we # need to specify that _FitterMeta is its metaclass. class LinearLSQFitter(metaclass=_FitterMeta): """ A class performing a linear least square fitting. Uses `numpy.linalg.lstsq` to do the fitting. Given a model and data, fits the model to the data and changes the model's parameters. Keeps a dictionary of auxiliary fitting information. Notes ----- Note that currently LinearLSQFitter does not support compound models. """ supported_constraints = ["fixed"] supports_masked_input = True def __init__(self, calc_uncertainties=False): self.fit_info = { "residuals": None, "rank": None, "singular_values": None, "params": None, } self._calc_uncertainties = calc_uncertainties @staticmethod def _is_invertible(m): """Check if inverse of matrix can be obtained.""" if m.shape[0] != m.shape[1]: return False if np.linalg.matrix_rank(m) < m.shape[0]: return False return True def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None, resids=None): """ Calculate and parameter covariance matrix and standard deviations and set `cov_matrix` and `stds` attributes. """ x_dot_x_prime = np.dot(a.T, a) masked = False or hasattr(y, "mask") # check if invertible. if not, can't calc covariance. if not self._is_invertible(x_dot_x_prime): return model inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime) if z is None: # 1D models if len(model) == 1: # single model mask = None if masked: mask = y.mask xx = np.ma.array(x, mask=mask) RSS = [(1 / (xx.count() - n_coeff)) * resids] if len(model) > 1: # model sets RSS = [] # collect sum residuals squared for each model in set for j in range(len(model)): mask = None if masked: mask = y.mask[..., j].flatten() xx = np.ma.array(x, mask=mask) eval_y = model(xx, model_set_axis=False) eval_y = np.rollaxis(eval_y, model.model_set_axis)[j] RSS.append( (1 / (xx.count() - n_coeff)) * np.sum((y[..., j] - eval_y) ** 2) ) else: # 2D model if len(model) == 1: mask = None if masked: warnings.warn( "Calculation of fitting uncertainties " "for 2D models with masked values not " "currently supported.\n", AstropyUserWarning, ) return xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask) # len(xx) instead of xx.count. this will break if values are masked? RSS = [(1 / (len(xx) - n_coeff)) * resids] else: RSS = [] for j in range(len(model)): eval_z = model(x, y, model_set_axis=False) mask = None # need to figure out how to deal w/ masking here. if model.model_set_axis == 1: # model_set_axis passed when evaluating only refers to input shapes # so output must be reshaped for model_set_axis=1. eval_z = np.rollaxis(eval_z, 1) eval_z = eval_z[j] RSS.append( [(1 / (len(x) - n_coeff)) * np.sum((z[j] - eval_z) ** 2)] ) covs = [inv_x_dot_x_prime * r for r in RSS] free_param_names = [ x for x in model.fixed if (model.fixed[x] is False) and (model.tied[x] is False) ] if len(covs) == 1: model.cov_matrix = Covariance(covs[0], model.param_names) model.stds = StandardDeviations(covs[0], free_param_names) else: model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs] model.stds = [StandardDeviations(cov, free_param_names) for cov in covs] @staticmethod def _deriv_with_constraints(model, param_indices, x=None, y=None): if y is None: d = np.array(model.fit_deriv(x, *model.parameters)) else: d = np.array(model.fit_deriv(x, y, *model.parameters)) if model.col_fit_deriv: return d[param_indices] else: return d[..., param_indices] def _map_domain_window(self, model, x, y=None): """ Maps domain into window for a polynomial model which has these attributes. """ if y is None: if hasattr(model, "domain") and model.domain is None: model.domain = [x.min(), x.max()] if hasattr(model, "window") and model.window is None: model.window = [-1, 1] return poly_map_domain(x, model.domain, model.window) else: if hasattr(model, "x_domain") and model.x_domain is None: model.x_domain = [x.min(), x.max()] if hasattr(model, "y_domain") and model.y_domain is None: model.y_domain = [y.min(), y.max()] if hasattr(model, "x_window") and model.x_window is None: model.x_window = [-1.0, 1.0] if hasattr(model, "y_window") and model.y_window is None: model.y_window = [-1.0, 1.0] xnew = poly_map_domain(x, model.x_domain, model.x_window) ynew = poly_map_domain(y, model.y_domain, model.y_window) return xnew, ynew @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, rcond=None): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array Input coordinates y : array-like Input coordinates z : array-like, optional Input coordinates. If the dependent (``y`` or ``z``) coordinate values are provided as a `numpy.ma.MaskedArray`, any masked points are ignored when fitting. Note that model set fitting is significantly slower when there are masked points (not just an empty mask), as the matrix equation has to be solved for each model separately when their coordinate grids differ. weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. rcond : float, optional Cut-off ratio for small singular values of ``a``. Singular values are set to zero if they are smaller than ``rcond`` times the largest singular value of ``a``. equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ if not model.fittable: raise ValueError("Model must be a subclass of FittableModel") if not model.linear: raise ModelLinearityError( "Model is not linear in parameters, " "linear fit methods should not be used." ) if hasattr(model, "submodel_names"): raise ValueError("Model must be simple, not compound") _validate_constraints(self.supported_constraints, model) model_copy = model.copy() model_copy.sync_constraints = False _, fitparam_indices, _ = model_to_fit_params(model_copy) if model_copy.n_inputs == 2 and z is None: raise ValueError("Expected x, y and z for a 2 dimensional model.") farg = _convert_input( x, y, z, n_models=len(model_copy), model_set_axis=model_copy.model_set_axis ) n_fixed = sum(model_copy.fixed.values()) # This is also done by _convert_inputs, but we need it here to allow # checking the array dimensionality before that gets called: if weights is not None: weights = np.asarray(weights, dtype=float) if n_fixed: # The list of fixed params is the complement of those being fitted: fixparam_indices = [ idx for idx in range(len(model_copy.param_names)) if idx not in fitparam_indices ] # Construct matrix of user-fixed parameters that can be dotted with # the corresponding fit_deriv() terms, to evaluate corrections to # the dependent variable in order to fit only the remaining terms: fixparams = np.asarray( [ getattr(model_copy, model_copy.param_names[idx]).value for idx in fixparam_indices ] ) if len(farg) == 2: x, y = farg if weights is not None: # If we have separate weights for each model, apply the same # conversion as for the data, otherwise check common weights # as if for a single model: _, weights = _convert_input( x, weights, n_models=len(model_copy) if weights.ndim == y.ndim else 1, model_set_axis=model_copy.model_set_axis, ) # map domain into window if hasattr(model_copy, "domain"): x = self._map_domain_window(model_copy, x) if n_fixed: lhs = np.asarray( self._deriv_with_constraints(model_copy, fitparam_indices, x=x) ) fixderivs = self._deriv_with_constraints( model_copy, fixparam_indices, x=x ) else: lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters)) sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x) rhs = y else: x, y, z = farg if weights is not None: # If we have separate weights for each model, apply the same # conversion as for the data, otherwise check common weights # as if for a single model: _, _, weights = _convert_input( x, y, weights, n_models=len(model_copy) if weights.ndim == z.ndim else 1, model_set_axis=model_copy.model_set_axis, ) # map domain into window if hasattr(model_copy, "x_domain"): x, y = self._map_domain_window(model_copy, x, y) if n_fixed: lhs = np.asarray( self._deriv_with_constraints(model_copy, fitparam_indices, x=x, y=y) ) fixderivs = self._deriv_with_constraints( model_copy, fixparam_indices, x=x, y=y ) else: lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters)) sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y) if len(model_copy) > 1: # Just to be explicit (rather than baking in False == 0): model_axis = model_copy.model_set_axis or 0 if z.ndim > 2: # For higher-dimensional z, flatten all the axes except the # dimension along which models are stacked and transpose so # the model axis is *last* (I think this resolves Erik's # pending generalization from 80a6f25a): rhs = np.rollaxis(z, model_axis, z.ndim) rhs = rhs.reshape(-1, rhs.shape[-1]) else: # This "else" seems to handle the corner case where the # user has already flattened x/y before attempting a 2D fit # but z has a second axis for the model set. NB. This is # ~5-10x faster than using rollaxis. rhs = z.T if model_axis == 0 else z if weights is not None: # Same for weights if weights.ndim > 2: # Separate 2D weights for each model: weights = np.rollaxis(weights, model_axis, weights.ndim) weights = weights.reshape(-1, weights.shape[-1]) elif weights.ndim == z.ndim: # Separate, flattened weights for each model: weights = weights.T if model_axis == 0 else weights else: # Common weights for all the models: weights = weights.flatten() else: rhs = z.flatten() if weights is not None: weights = weights.flatten() # If the derivative is defined along rows (as with non-linear models) if model_copy.col_fit_deriv: lhs = np.asarray(lhs).T # Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs # when constructing their Vandermonde matrix, which can lead to obscure # failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices, # so just raise a slightly more informative error when this happens: if np.asanyarray(lhs).ndim > 2: raise ValueError( f"{type(model_copy).__name__} gives unsupported >2D " "derivative matrix for this x/y" ) # Subtract any terms fixed by the user from (a copy of) the RHS, in # order to fit the remaining terms correctly: if n_fixed: if model_copy.col_fit_deriv: fixderivs = np.asarray(fixderivs).T # as for lhs above rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms # Subtract any terms implicit in the model from the RHS, which, like # user-fixed terms, affect the dependent variable but are not fitted: if sum_of_implicit_terms is not None: # If we have a model set, the extra axis must be added to # sum_of_implicit_terms as its innermost dimension, to match the # dimensionality of rhs after _convert_input "rolls" it as needed # by np.linalg.lstsq. The vector then gets broadcast to the right # number of sets (columns). This assumes all the models share the # same input coordinates, as is currently the case. if len(model_copy) > 1: sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis] rhs = rhs - sum_of_implicit_terms if weights is not None: if rhs.ndim == 2: if weights.shape == rhs.shape: # separate weights for multiple models case: broadcast # lhs to have more dimension (for each model) lhs = lhs[..., np.newaxis] * weights[:, np.newaxis] rhs = rhs * weights else: lhs *= weights[:, np.newaxis] # Don't modify in-place in case rhs was the original # dependent variable array rhs = rhs * weights[:, np.newaxis] else: lhs *= weights[:, np.newaxis] rhs = rhs * weights scl = (lhs * lhs).sum(0) lhs /= scl masked = np.any(np.ma.getmask(rhs)) if weights is not None and not masked and np.any(np.isnan(lhs)): raise ValueError( "Found NaNs in the coefficient matrix, which " "should not happen and would crash the lapack " "routine. Maybe check that weights are not null." ) a = None # need for calculating covarience if (masked and len(model_copy) > 1) or ( weights is not None and weights.ndim > 1 ): # Separate masks or weights for multiple models case: Numpy's # lstsq supports multiple dimensions only for rhs, so we need to # loop manually on the models. This may be fixed in the future # with https://github.com/numpy/numpy/pull/15777. # Initialize empty array of coefficients and populate it one model # at a time. The shape matches the number of coefficients from the # Vandermonde matrix and the number of models from the RHS: lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype) # Arrange the lhs as a stack of 2D matrices that we can iterate # over to get the correctly-orientated lhs for each model: if lhs.ndim > 2: lhs_stack = np.rollaxis(lhs, -1, 0) else: lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape) # Loop over the models and solve for each one. By this point, the # model set axis is the second of two. Transpose rather than using, # say, np.moveaxis(array, -1, 0), since it's slightly faster and # lstsq can't handle >2D arrays anyway. This could perhaps be # optimized by collecting together models with identical masks # (eg. those with no rejected points) into one operation, though it # will still be relatively slow when calling lstsq repeatedly. for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T): # Cull masked points on both sides of the matrix equation: good = ~model_rhs.mask if masked else slice(None) model_lhs = model_lhs[good] model_rhs = model_rhs[good][..., np.newaxis] a = model_lhs # Solve for this model: t_coef, resids, rank, sval = np.linalg.lstsq( model_lhs, model_rhs, rcond ) model_lacoef[:] = t_coef.T else: # If we're fitting one or more models over a common set of points, # we only have to solve a single matrix equation, which is an order # of magnitude faster than calling lstsq() once per model below: good = ~rhs.mask if masked else slice(None) # latter is a no-op a = lhs[good] # Solve for one or more models: lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good], rhs[good], rcond) self.fit_info["residuals"] = resids self.fit_info["rank"] = rank self.fit_info["singular_values"] = sval lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl self.fit_info["params"] = lacoef fitter_to_model_params(model_copy, lacoef.flatten()) # TODO: Only Polynomial models currently have an _order attribute; # maybe change this to read isinstance(model, PolynomialBase) if ( hasattr(model_copy, "_order") and len(model_copy) == 1 and rank < (model_copy._order - n_fixed) ): warnings.warn("The fit may be poorly conditioned\n", AstropyUserWarning) # calculate and set covariance matrix and standard devs. on model if self._calc_uncertainties: if len(y) > len(lacoef): self._add_fitting_uncertainties( model_copy, a * scl, len(lacoef), x, y, z, resids ) model_copy.sync_constraints = True return model_copy class FittingWithOutlierRemoval: """ This class combines an outlier removal technique with a fitting procedure. Basically, given a maximum number of iterations ``niter``, outliers are removed and fitting is performed for each iteration, until no new outliers are found or ``niter`` is reached. Parameters ---------- fitter : `Fitter` An instance of any Astropy fitter, i.e., LinearLSQFitter, LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For model set fitting, this must understand masked input data (as indicated by the fitter class attribute ``supports_masked_input``). outlier_func : callable A function for outlier removal. If this accepts an ``axis`` parameter like the `numpy` functions, the appropriate value will be supplied automatically when fitting model sets (unless overridden in ``outlier_kwargs``), to find outliers for each model separately; otherwise, the same filtering must be performed in a loop over models, which is almost an order of magnitude slower. niter : int, optional Maximum number of iterations. outlier_kwargs : dict, optional Keyword arguments for outlier_func. Attributes ---------- fit_info : dict The ``fit_info`` (if any) from the last iteration of the wrapped ``fitter`` during the most recent fit. An entry is also added with the keyword ``niter`` that records the actual number of fitting iterations performed (as opposed to the user-specified maximum). """ def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs): self.fitter = fitter self.outlier_func = outlier_func self.niter = niter self.outlier_kwargs = outlier_kwargs self.fit_info = {"niter": None} def __str__(self): return ( f"Fitter: {self.fitter.__class__.__name__}\n" f"Outlier function: {self.outlier_func.__name__}\n" f"Num. of iterations: {self.niter}\n" f"Outlier func. args.: {self.outlier_kwargs}" ) def __repr__(self): return ( f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, " f"outlier_func: {self.outlier_func.__name__}," f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})" ) def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Parameters ---------- model : `~astropy.modeling.FittableModel` An analytic model which will be fit to the provided data. This also contains the initial guess for an optimization algorithm. x : array-like Input coordinates. y : array-like Data measurements (1D case) or input coordinates (2D case). z : array-like, optional Data measurements (2D case). weights : array-like, optional Weights to be passed to the fitter. kwargs : dict, optional Keyword arguments to be passed to the fitter. Returns ------- fitted_model : `~astropy.modeling.FittableModel` Fitted model after outlier removal. mask : `numpy.ndarray` Boolean mask array, identifying which points were used in the final fitting iteration (False) and which were found to be outliers or were masked in the input (True). """ # For single models, the data get filtered here at each iteration and # then passed to the fitter, which is the historical behavior and # works even for fitters that don't understand masked arrays. For model # sets, the fitter must be able to filter masked data internally, # because fitters require a single set of x/y coordinates whereas the # eliminated points can vary between models. To avoid this limitation, # we could fall back to looping over individual model fits, but it # would likely be fiddly and involve even more overhead (and the # non-linear fitters don't work with model sets anyway, as of writing). if len(model) == 1: model_set_axis = None else: if ( not hasattr(self.fitter, "supports_masked_input") or self.fitter.supports_masked_input is not True ): raise ValueError( f"{type(self.fitter).__name__} cannot fit model sets with masked " "values" ) # Fitters use their input model's model_set_axis to determine how # their input data are stacked: model_set_axis = model.model_set_axis # Construct input coordinate tuples for fitters & models that are # appropriate for the dimensionality being fitted: if z is None: coords = (x,) data = y else: coords = x, y data = z # For model sets, construct a numpy-standard "axis" tuple for the # outlier function, to treat each model separately (if supported): if model_set_axis is not None: if model_set_axis < 0: model_set_axis += data.ndim if "axis" not in self.outlier_kwargs: # allow user override # This also works for False (like model instantiation): self.outlier_kwargs["axis"] = tuple( n for n in range(data.ndim) if n != model_set_axis ) loop = False # Starting fit, prior to any iteration and masking: fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs) filtered_data = np.ma.masked_array(data) if filtered_data.mask is np.ma.nomask: filtered_data.mask = False filtered_weights = weights last_n_masked = filtered_data.mask.sum() n = 0 # (allow recording no. of iterations when 0) # Perform the iterative fitting: for n in range(1, self.niter + 1): # (Re-)evaluate the last model: model_vals = fitted_model(*coords, model_set_axis=False) # Determine the outliers: if not loop: # Pass axis parameter if outlier_func accepts it, otherwise # prepare for looping over models: try: filtered_data = self.outlier_func( filtered_data - model_vals, **self.outlier_kwargs ) # If this happens to catch an error with a parameter other # than axis, the next attempt will fail accordingly: except TypeError: if model_set_axis is None: raise else: self.outlier_kwargs.pop("axis", None) loop = True # Construct MaskedArray to hold filtered values: filtered_data = np.ma.masked_array( filtered_data, dtype=np.result_type(filtered_data, model_vals), copy=True, ) # Make sure the mask is an array, not just nomask: if filtered_data.mask is np.ma.nomask: filtered_data.mask = False # Get views transposed appropriately for iteration # over the set (handling data & mask separately due to # NumPy issue #8506): data_T = np.rollaxis(filtered_data, model_set_axis, 0) mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0) if loop: model_vals_T = np.rollaxis(model_vals, model_set_axis, 0) for row_data, row_mask, row_mod_vals in zip( data_T, mask_T, model_vals_T ): masked_residuals = self.outlier_func( row_data - row_mod_vals, **self.outlier_kwargs ) row_data.data[:] = masked_residuals.data row_mask[:] = masked_residuals.mask # Issue speed warning after the fact, so it only shows up when # the TypeError is genuinely due to the axis argument. warnings.warn( "outlier_func did not accept axis argument; " "reverted to slow loop over models.", AstropyUserWarning, ) # Recombine newly-masked residuals with model to get masked values: filtered_data += model_vals # Re-fit the data after filtering, passing masked/unmasked values # for single models / sets, respectively: if model_set_axis is None: good = ~filtered_data.mask if weights is not None: filtered_weights = weights[good] fitted_model = self.fitter( fitted_model, *(c[good] for c in coords), filtered_data.data[good], weights=filtered_weights, **kwargs, ) else: fitted_model = self.fitter( fitted_model, *coords, filtered_data, weights=filtered_weights, **kwargs, ) # Stop iteration if the masked points are no longer changing (with # cumulative rejection we only need to compare how many there are): this_n_masked = filtered_data.mask.sum() # (minimal overhead) if this_n_masked == last_n_masked: break last_n_masked = this_n_masked self.fit_info = {"niter": n} self.fit_info.update(getattr(self.fitter, "fit_info", {})) return fitted_model, filtered_data.mask class _NonLinearLSQFitter(metaclass=_FitterMeta): """ Base class for Non-Linear least-squares fitters Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds : bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. Default: True """ supported_constraints = ["fixed", "tied", "bounds"] """ The constraint types supported by this fitter type. """ def __init__(self, calc_uncertainties=False, use_min_max_bounds=True): self.fit_info = None self._calc_uncertainties = calc_uncertainties self._use_min_max_bounds = use_min_max_bounds super().__init__() def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list parameters returned by the fitter args : list [model, [weights], [input coordinates]] """ model = args[0] weights = args[1] fitter_to_model_params(model, fps, self._use_min_max_bounds) meas = args[-1] if weights is None: value = np.ravel(model(*args[2:-1]) - meas) else: value = np.ravel(weights * (model(*args[2:-1]) - meas)) if not np.all(np.isfinite(value)): raise NonFiniteValueError( "Objective function has encountered a non-finite value, " "this will cause the fit to fail!\n" "Please remove non-finite values from your input data before " "fitting to avoid this error." ) return value @staticmethod def _add_fitting_uncertainties(model, cov_matrix): """ Set ``cov_matrix`` and ``stds`` attributes on model with parameter covariance matrix returned by ``optimize.leastsq``. """ free_param_names = [ x for x in model.fixed if (model.fixed[x] is False) and (model.tied[x] is False) ] model.cov_matrix = Covariance(cov_matrix, free_param_names) model.stds = StandardDeviations(cov_matrix, free_param_names) @staticmethod def _wrap_deriv(params, model, weights, x, y, z=None): """ Wraps the method calculating the Jacobian of the function to account for model constraints. `scipy.optimize.leastsq` expects the function derivative to have the above signature (parlist, (argtuple)). In order to accommodate model constraints, instead of using p directly, we set the parameter list in this function. """ if weights is None: weights = 1.0 if any(model.fixed.values()) or any(model.tied.values()): # update the parameters with the current values from the fitter fitter_to_model_params(model, params) if z is None: full = np.array(model.fit_deriv(x, *model.parameters)) if not model.col_fit_deriv: full_deriv = np.ravel(weights) * full.T else: full_deriv = np.ravel(weights) * full else: full = np.array( [np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)] ) if not model.col_fit_deriv: full_deriv = np.ravel(weights) * full.T else: full_deriv = np.ravel(weights) * full pars = [getattr(model, name) for name in model.param_names] fixed = [par.fixed for par in pars] tied = [par.tied for par in pars] tied = list(np.where([par.tied is not False for par in pars], True, tied)) fix_and_tie = np.logical_or(fixed, tied) ind = np.logical_not(fix_and_tie) if not model.col_fit_deriv: residues = np.asarray(full_deriv[np.nonzero(ind)]).T else: residues = full_deriv[np.nonzero(ind)] return [np.ravel(_) for _ in residues] else: if z is None: fit_deriv = np.array(model.fit_deriv(x, *params)) try: output = np.array( [np.ravel(_) for _ in np.array(weights) * fit_deriv] ) if output.shape != fit_deriv.shape: output = np.array( [np.ravel(_) for _ in np.atleast_2d(weights).T * fit_deriv] ) return output except ValueError: return np.array( [ np.ravel(_) for _ in np.array(weights) * np.moveaxis(fit_deriv, -1, 0) ] ).transpose() else: if not model.col_fit_deriv: return [ np.ravel(_) for _ in ( np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T ).T ] return [ np.ravel(_) for _ in weights * np.array(model.fit_deriv(x, y, *params)) ] def _compute_param_cov(self, model, y, init_values, cov_x, fitparams, farg): # now try to compute the true covariance matrix if (len(y) > len(init_values)) and cov_x is not None: sum_sqrs = np.sum(self.objective_function(fitparams, *farg) ** 2) dof = len(y) - len(init_values) self.fit_info["param_cov"] = cov_x * sum_sqrs / dof else: self.fit_info["param_cov"] = None if self._calc_uncertainties is True: if self.fit_info["param_cov"] is not None: self._add_fitting_uncertainties(model, self.fit_info["param_cov"]) def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian): return None, None, None def _filter_non_finite(self, x, y, z=None): """ Filter out non-finite values in x, y, z. Returns ------- x, y, z : ndarrays x, y, and z with non-finite values filtered out. """ MESSAGE = "Non-Finite input data has been removed by the fitter." if z is None: mask = np.isfinite(y) if not np.all(mask): warnings.warn(MESSAGE, AstropyUserWarning) return x[mask], y[mask], None else: mask = np.isfinite(z) if not np.all(mask): warnings.warn(MESSAGE, AstropyUserWarning) return x[mask], y[mask], z[mask] @fitter_unit_support def __call__( self, model, x, y, z=None, weights=None, maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC, epsilon=DEFAULT_EPS, estimate_jacobian=False, filter_non_finite=False, ): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. maxiter : int maximum number of iterations acc : float Relative error desired in the approximate solution epsilon : float A suitable step length for the forward-difference approximation of the Jacobian (if model.fjac=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. estimate_jacobian : bool If False (default) and if the model has a fit_deriv method, it will be used. Otherwise the Jacobian will be estimated. If True, the Jacobian will be estimated in any case. equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. filter_non_finite : bool, optional Whether or not to filter data with non-finite values. Default is False Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self.supported_constraints) model_copy.sync_constraints = False if filter_non_finite: x, y, z = self._filter_non_finite(x, y, z) farg = ( model_copy, weights, ) + _convert_input(x, y, z) init_values, fitparams, cov_x = self._run_fitter( model_copy, farg, maxiter, acc, epsilon, estimate_jacobian ) self._compute_param_cov(model_copy, y, init_values, cov_x, fitparams, farg) model.sync_constraints = True return model_copy class LevMarLSQFitter(_NonLinearLSQFitter): """ Levenberg-Marquardt algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False Attributes ---------- fit_info : dict The `scipy.optimize.leastsq` result for the most recent fit (see notes). Notes ----- The ``fit_info`` dictionary contains the values returned by `scipy.optimize.leastsq` for the most recent fit, including the values from the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq` documentation for details on the meaning of these values. Note that the ``x`` return value is *not* included (as it is instead the parameter values of the returned model). Additionally, one additional element of ``fit_info`` is computed whenever a model is fit, with the key 'param_cov'. The corresponding value is the covariance matrix of the parameters as a 2D numpy array. The order of the matrix elements matches the order of the parameters in the fitted model (i.e., the same order as ``model.param_names``). """ def __init__(self, calc_uncertainties=False): super().__init__(calc_uncertainties) self.fit_info = { "nfev": None, "fvec": None, "fjac": None, "ipvt": None, "qtf": None, "message": None, "ierr": None, "param_jac": None, "param_cov": None, } def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian): from scipy import optimize if model.fit_deriv is None or estimate_jacobian: dfunc = None else: dfunc = self._wrap_deriv init_values, _, _ = model_to_fit_params(model) fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq( self.objective_function, init_values, args=farg, Dfun=dfunc, col_deriv=model.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon, xtol=acc, full_output=True, ) fitter_to_model_params(model, fitparams) self.fit_info.update(dinfo) self.fit_info["cov_x"] = cov_x self.fit_info["message"] = mess self.fit_info["ierr"] = ierr if ierr not in [1, 2, 3, 4]: warnings.warn( "The fit may be unsuccessful; check " "fit_info['message'] for more information.", AstropyUserWarning, ) return init_values, fitparams, cov_x class _NLLSQFitter(_NonLinearLSQFitter): """ Wrapper class for `scipy.optimize.least_squares` method, which provides: - Trust Region Reflective - dogbox - Levenberg-Marqueardt algorithms using the least squares statistic. Parameters ---------- method : str ‘trf’ : Trust Region Reflective algorithm, particularly suitable for large sparse problems with bounds. Generally robust method. ‘dogbox’ : dogleg algorithm with rectangular trust regions, typical use case is small problems with bounds. Not recommended for problems with rank-deficient Jacobian. ‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK. Doesn’t handle bounds and sparse Jacobians. Usually the most efficient method for small unconstrained problems. calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds: bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. A True setting will replicate how LevMarLSQFitter enforces bounds. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False): super().__init__(calc_uncertainties, use_min_max_bounds) self._method = method def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian): from scipy import optimize from scipy.linalg import svd if model.fit_deriv is None or estimate_jacobian: dfunc = "2-point" else: def _dfunc(params, model, weights, x, y, z=None): if model.col_fit_deriv: return np.transpose( self._wrap_deriv(params, model, weights, x, y, z) ) else: return self._wrap_deriv(params, model, weights, x, y, z) dfunc = _dfunc init_values, _, bounds = model_to_fit_params(model) # Note, if use_min_max_bounds is True we are defaulting to enforcing bounds # using the old method employed by LevMarLSQFitter, this is different # from the method that optimize.least_squares employs to enforce bounds # thus we override the bounds being passed to optimize.least_squares so # that it will not enforce any bounding. if self._use_min_max_bounds: bounds = (-np.inf, np.inf) self.fit_info = optimize.least_squares( self.objective_function, init_values, args=farg, jac=dfunc, max_nfev=maxiter, diff_step=np.sqrt(epsilon), xtol=acc, method=self._method, bounds=bounds, ) # Adapted from ~scipy.optimize.minpack, see: # https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816 # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(self.fit_info.jac, full_matrices=False) threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0] s = s[s > threshold] VT = VT[: s.size] cov_x = np.dot(VT.T / s**2, VT) fitter_to_model_params(model, self.fit_info.x, False) if not self.fit_info.success: warnings.warn( f"The fit may be unsuccessful; check: \n {self.fit_info.message}", AstropyUserWarning, ) return init_values, self.fit_info.x, cov_x class TRFLSQFitter(_NLLSQFitter): """ Trust Region Reflective algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds: bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. A True setting will replicate how LevMarLSQFitter enforces bounds. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, calc_uncertainties=False, use_min_max_bounds=False): super().__init__("trf", calc_uncertainties, use_min_max_bounds) class DogBoxLSQFitter(_NLLSQFitter): """ DogBox algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds: bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. A True setting will replicate how LevMarLSQFitter enforces bounds. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, calc_uncertainties=False, use_min_max_bounds=False): super().__init__("dogbox", calc_uncertainties, use_min_max_bounds) class LMLSQFitter(_NLLSQFitter): """ `scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, calc_uncertainties=False): super().__init__("lm", calc_uncertainties, True) class SLSQPLSQFitter(Fitter): """ Sequential Least Squares Programming (SLSQP) optimization algorithm and least squares statistic. Raises ------ ModelLinearityError A linear model is passed to a nonlinear fitter Notes ----- See also the `~astropy.modeling.optimizers.SLSQP` optimizer. """ supported_constraints = SLSQP.supported_constraints def __init__(self): super().__init__(optimizer=SLSQP, statistic=leastsquare) self.fit_info = {} @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic verblevel : int 0-silent 1-print summary upon completion, 2-print summary after each iteration maxiter : int maximum number of iterations epsilon : float the step size for finite-difference derivative estimates acc : float Requested accuracy equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self._opt_method.supported_constraints) model_copy.sync_constraints = False farg = _convert_input(x, y, z) farg = ( model_copy, weights, ) + farg init_values, _, _ = model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.objective_function, init_values, farg, **kwargs ) fitter_to_model_params(model_copy, fitparams) model_copy.sync_constraints = True return model_copy class SimplexLSQFitter(Fitter): """ Simplex algorithm and least squares statistic. Raises ------ `ModelLinearityError` A linear model is passed to a nonlinear fitter """ supported_constraints = Simplex.supported_constraints def __init__(self): super().__init__(optimizer=Simplex, statistic=leastsquare) self.fit_info = {} @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic maxiter : int maximum number of iterations acc : float Relative error in approximate solution equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self._opt_method.supported_constraints) model_copy.sync_constraints = False farg = _convert_input(x, y, z) farg = ( model_copy, weights, ) + farg init_values, _, _ = model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.objective_function, init_values, farg, **kwargs ) fitter_to_model_params(model_copy, fitparams) model_copy.sync_constraints = True return model_copy class JointFitter(metaclass=_FitterMeta): """ Fit models which share a parameter. For example, fit two gaussians to two data sets but keep the FWHM the same. Parameters ---------- models : list a list of model instances jointparameters : list a list of joint parameters initvals : list a list of initial values """ def __init__(self, models, jointparameters, initvals): self.models = list(models) self.initvals = list(initvals) self.jointparams = jointparameters self._verify_input() self.fitparams = self.model_to_fit_params() # a list of model.n_inputs self.modeldims = [m.n_inputs for m in self.models] # sum all model dimensions self.ndim = np.sum(self.modeldims) def model_to_fit_params(self): fparams = [] fparams.extend(self.initvals) for model in self.models: params = model.parameters.tolist() joint_params = self.jointparams[model] param_metrics = model._param_metrics for param_name in joint_params: slice_ = param_metrics[param_name]["slice"] del params[slice_] fparams.extend(params) return fparams def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list the fitted parameters - result of an one iteration of the fitting algorithm args : dict tuple of measured and input coordinates args is always passed as a tuple from optimize.leastsq """ lstsqargs = list(args) fitted = [] fitparams = list(fps) numjp = len(self.initvals) # make a separate list of the joint fitted parameters jointfitparams = fitparams[:numjp] del fitparams[:numjp] for model in self.models: joint_params = self.jointparams[model] margs = lstsqargs[: model.n_inputs + 1] del lstsqargs[: model.n_inputs + 1] # separate each model separately fitted parameters numfp = len(model._parameters) - len(joint_params) mfparams = fitparams[:numfp] del fitparams[:numfp] # recreate the model parameters mparams = [] param_metrics = model._param_metrics for param_name in model.param_names: if param_name in joint_params: index = joint_params.index(param_name) # should do this with slices in case the # parameter is not a number mparams.extend([jointfitparams[index]]) else: slice_ = param_metrics[param_name]["slice"] plen = slice_.stop - slice_.start mparams.extend(mfparams[:plen]) del mfparams[:plen] modelfit = model.evaluate(margs[:-1], *mparams) fitted.extend(modelfit - margs[-1]) return np.ravel(fitted) def _verify_input(self): if len(self.models) <= 1: raise TypeError(f"Expected >1 models, {len(self.models)} is given") if len(self.jointparams.keys()) < 2: raise TypeError( "At least two parameters are expected, " f"{len(self.jointparams.keys())} is given" ) for j in self.jointparams.keys(): if len(self.jointparams[j]) != len(self.initvals): raise TypeError( f"{len(self.jointparams[j])} parameter(s) " f"provided but {len(self.initvals)} expected" ) def __call__(self, *args): """ Fit data to these models keeping some of the parameters common to the two models. """ from scipy import optimize if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims): raise ValueError( f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} " f"coordinates in args but {len(args)} provided" ) self.fitparams[:], _ = optimize.leastsq( self.objective_function, self.fitparams, args=args ) fparams = self.fitparams[:] numjp = len(self.initvals) # make a separate list of the joint fitted parameters jointfitparams = fparams[:numjp] del fparams[:numjp] for model in self.models: # extract each model's fitted parameters joint_params = self.jointparams[model] numfp = len(model._parameters) - len(joint_params) mfparams = fparams[:numfp] del fparams[:numfp] # recreate the model parameters mparams = [] param_metrics = model._param_metrics for param_name in model.param_names: if param_name in joint_params: index = joint_params.index(param_name) # should do this with slices in case the parameter # is not a number mparams.extend([jointfitparams[index]]) else: slice_ = param_metrics[param_name]["slice"] plen = slice_.stop - slice_.start mparams.extend(mfparams[:plen]) del mfparams[:plen] model.parameters = np.array(mparams) def _convert_input(x, y, z=None, n_models=1, model_set_axis=0): """Convert inputs to float arrays.""" x = np.asanyarray(x, dtype=float) y = np.asanyarray(y, dtype=float) if z is not None: z = np.asanyarray(z, dtype=float) data_ndim, data_shape = z.ndim, z.shape else: data_ndim, data_shape = y.ndim, y.shape # For compatibility with how the linear fitter code currently expects to # work, shift the dependent variable's axes to the expected locations if n_models > 1 or data_ndim > x.ndim: if (model_set_axis or 0) >= data_ndim: raise ValueError("model_set_axis out of range") if data_shape[model_set_axis] != n_models: raise ValueError( "Number of data sets (y or z array) is expected to equal " "the number of parameter sets" ) if z is None: # For a 1-D model the y coordinate's model-set-axis is expected to # be last, so that its first dimension is the same length as the x # coordinates. This is in line with the expectations of # numpy.linalg.lstsq: # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html # That is, each model should be represented by a column. TODO: # Obviously this is a detail of np.linalg.lstsq and should be # handled specifically by any fitters that use it... y = np.rollaxis(y, model_set_axis, y.ndim) data_shape = y.shape[:-1] else: # Shape of z excluding model_set_axis data_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1 :] if z is None: if data_shape != x.shape: raise ValueError("x and y should have the same shape") farg = (x, y) else: if not (x.shape == y.shape == data_shape): raise ValueError("x, y and z should have the same shape") farg = (x, y, z) return farg # TODO: These utility functions are really particular to handling # bounds/tied/fixed constraints for scipy.optimize optimizers that do not # support them inherently; this needs to be reworked to be clear about this # distinction (and the fact that these are not necessarily applicable to any # arbitrary fitter--as evidenced for example by the fact that JointFitter has # its own versions of these) # TODO: Most of this code should be entirely rewritten; it should not be as # inefficient as it is. def fitter_to_model_params(model, fps, use_min_max_bounds=True): """ Constructs the full list of model parameters from the fitted and constrained parameters. Parameters ---------- model : The model being fit fps : The fit parameter values to be assigned use_min_max_bounds: bool If the set parameter bounds for model will be enforced on each parameter with bounds. Default: True """ _, fit_param_indices, _ = model_to_fit_params(model) has_tied = any(model.tied.values()) has_fixed = any(model.fixed.values()) has_bound = any(b != (None, None) for b in model.bounds.values()) parameters = model.parameters if not (has_tied or has_fixed or has_bound): # We can just assign directly model.parameters = fps return fit_param_indices = set(fit_param_indices) offset = 0 param_metrics = model._param_metrics for idx, name in enumerate(model.param_names): if idx not in fit_param_indices: continue slice_ = param_metrics[name]["slice"] shape = param_metrics[name]["shape"] # This is determining which range of fps (the fitted parameters) maps # to parameters of the model size = reduce(operator.mul, shape, 1) values = fps[offset : offset + size] # Check bounds constraints if model.bounds[name] != (None, None) and use_min_max_bounds: _min, _max = model.bounds[name] if _min is not None: values = np.fmax(values, _min) if _max is not None: values = np.fmin(values, _max) parameters[slice_] = values offset += size # Update model parameters before calling ``tied`` constraints. model._array_to_parameters() # This has to be done in a separate loop due to how tied parameters are # currently evaluated (the fitted parameters need to actually be *set* on # the model first, for use in evaluating the "tied" expression--it might be # better to change this at some point if has_tied: for idx, name in enumerate(model.param_names): if model.tied[name]: value = model.tied[name](model) slice_ = param_metrics[name]["slice"] # To handle multiple tied constraints, model parameters # need to be updated after each iteration. parameters[slice_] = value model._array_to_parameters() @deprecated("5.1", "private method: _fitter_to_model_params has been made public now") def _fitter_to_model_params(model, fps): return fitter_to_model_params(model, fps) def model_to_fit_params(model): """ Convert a model instance's parameter array to an array that can be used with a fitter that doesn't natively support fixed or tied parameters. In particular, it removes fixed/tied parameters from the parameter array. These may be a subset of the model parameters, if some of them are held constant or tied. """ fitparam_indices = list(range(len(model.param_names))) model_params = model.parameters model_bounds = list(model.bounds.values()) if any(model.fixed.values()) or any(model.tied.values()): params = list(model_params) param_metrics = model._param_metrics for idx, name in list(enumerate(model.param_names))[::-1]: if model.fixed[name] or model.tied[name]: slice_ = param_metrics[name]["slice"] del params[slice_] del model_bounds[slice_] del fitparam_indices[idx] model_params = np.array(params) for idx, bound in enumerate(model_bounds): if bound[0] is None: lower = -np.inf else: lower = bound[0] if bound[1] is None: upper = np.inf else: upper = bound[1] model_bounds[idx] = (lower, upper) model_bounds = tuple(zip(*model_bounds)) return model_params, fitparam_indices, model_bounds @deprecated("5.1", "private method: _model_to_fit_params has been made public now") def _model_to_fit_params(model): return model_to_fit_params(model) def _validate_constraints(supported_constraints, model): """Make sure model constraints are supported by the current fitter.""" message = "Optimizer cannot handle {0} constraints." if any(model.fixed.values()) and "fixed" not in supported_constraints: raise UnsupportedConstraintError(message.format("fixed parameter")) if any(model.tied.values()) and "tied" not in supported_constraints: raise UnsupportedConstraintError(message.format("tied parameter")) if ( any(tuple(b) != (None, None) for b in model.bounds.values()) and "bounds" not in supported_constraints ): raise UnsupportedConstraintError(message.format("bound parameter")) if model.eqcons and "eqcons" not in supported_constraints: raise UnsupportedConstraintError(message.format("equality")) if model.ineqcons and "ineqcons" not in supported_constraints: raise UnsupportedConstraintError(message.format("inequality")) def _validate_model(model, supported_constraints): """ Check that model and fitter are compatible and return a copy of the model. """ if not model.fittable: raise ValueError("Model does not appear to be fittable.") if model.linear: warnings.warn( "Model is linear in parameters; consider using linear fitting methods.", AstropyUserWarning, ) elif len(model) != 1: # for now only single data sets ca be fitted raise ValueError("Non-linear fitters can only fit one data set at a time.") _validate_constraints(supported_constraints, model) model_copy = model.copy() return model_copy def populate_entry_points(entry_points): """ This injects entry points into the `astropy.modeling.fitting` namespace. This provides a means of inserting a fitting routine without requirement of it being merged into astropy's core. Parameters ---------- entry_points : list of `~importlib.metadata.EntryPoint` entry_points are objects which encapsulate importable objects and are defined on the installation of a package. Notes ----- An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_ """ for entry_point in entry_points: name = entry_point.name try: entry_point = entry_point.load() except Exception as e: # This stops the fitting from choking if an entry_point produces an error. warnings.warn( AstropyUserWarning( f"{type(e).__name__} error occurred in entry point {name}." ) ) else: if not inspect.isclass(entry_point): warnings.warn( AstropyUserWarning( f"Modeling entry point {name} expected to be a Class." ) ) else: if issubclass(entry_point, Fitter): name = entry_point.__name__ globals()[name] = entry_point __all__.append(name) else: warnings.warn( AstropyUserWarning( f"Modeling entry point {name} expected to extend " "astropy.modeling.Fitter" ) ) def _populate_ep(): # TODO: Exclusively use select when Python minversion is 3.10 ep = entry_points() if hasattr(ep, "select"): populate_entry_points(ep.select(group="astropy.modeling")) else: populate_entry_points(ep.get("astropy.modeling", [])) _populate_ep()
cb34baffbc7c3e1341d9bce63108f27230d21ce5e0b57a597206ac31ad400535
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name """ This module defines classes that deal with parameters. It is unlikely users will need to work with these classes directly, unless they define their own models. """ import functools import numbers import operator import numpy as np from astropy.units import MagUnit, Quantity from astropy.utils import isiterable from .utils import array_repr_oneline, get_inputs_and_params __all__ = ["Parameter", "InputParameterError", "ParameterError"] class ParameterError(Exception): """Generic exception class for all exceptions pertaining to Parameters.""" class InputParameterError(ValueError, ParameterError): """Used for incorrect input parameter values and definitions.""" class ParameterDefinitionError(ParameterError): """Exception in declaration of class-level Parameters.""" def _tofloat(value): """Convert a parameter to float or float array""" if isiterable(value): try: value = np.asanyarray(value, dtype=float) except (TypeError, ValueError): # catch arrays with strings or user errors like different # types of parameters in a parameter set raise InputParameterError( f"Parameter of {type(value)} could not be converted to float" ) elif isinstance(value, Quantity): # Quantities are fine as is pass elif isinstance(value, np.ndarray): # A scalar/dimensionless array value = float(value.item()) elif isinstance(value, (numbers.Number, np.number)) and not isinstance(value, bool): value = float(value) elif isinstance(value, bool): raise InputParameterError( "Expected parameter to be of numerical type, not boolean" ) else: raise InputParameterError( f"Don't know how to convert parameter of {type(value)} to float" ) return value # Helpers for implementing operator overloading on Parameter def _binary_arithmetic_operation(op, reflected=False): @functools.wraps(op) def wrapper(self, val): if self.unit is not None: self_value = Quantity(self.value, self.unit) else: self_value = self.value if reflected: return op(val, self_value) else: return op(self_value, val) return wrapper def _binary_comparison_operation(op): @functools.wraps(op) def wrapper(self, val): if self.unit is not None: self_value = Quantity(self.value, self.unit) else: self_value = self.value return op(self_value, val) return wrapper def _unary_arithmetic_operation(op): @functools.wraps(op) def wrapper(self): if self.unit is not None: self_value = Quantity(self.value, self.unit) else: self_value = self.value return op(self_value) return wrapper class Parameter: """ Wraps individual parameters. Since 4.0 Parameters are no longer descriptors and are based on a new implementation of the Parameter class. Parameters now (as of 4.0) store values locally (as instead previously in the associated model) This class represents a model's parameter (in a somewhat broad sense). It serves a number of purposes: 1) A type to be recognized by models and treated specially at class initialization (i.e., if it is found that there is a class definition of a Parameter, the model initializer makes a copy at the instance level). 2) Managing the handling of allowable parameter values and once defined, ensuring updates are consistent with the Parameter definition. This includes the optional use of units and quantities as well as transforming values to an internally consistent representation (e.g., from degrees to radians through the use of getters and setters). 3) Holding attributes of parameters relevant to fitting, such as whether the parameter may be varied in fitting, or whether there are constraints that must be satisfied. See :ref:`astropy:modeling-parameters` for more details. Parameters ---------- name : str parameter name .. warning:: The fact that `Parameter` accepts ``name`` as an argument is an implementation detail, and should not be used directly. When defining a new `Model` class, parameter names are always automatically defined by the class attribute they're assigned to. description : str parameter description default : float or array default value to use for this parameter unit : `~astropy.units.Unit` if specified, the parameter will be in these units, and when the parameter is updated in future, it should be set to a :class:`~astropy.units.Quantity` that has equivalent units. getter : callable a function that wraps the raw (internal) value of the parameter when returning the value through the parameter proxy (eg. a parameter may be stored internally as radians but returned to the user as degrees) setter : callable a function that wraps any values assigned to this parameter; should be the inverse of getter fixed : bool if True the parameter is not varied during fitting tied : callable or False if callable is supplied it provides a way to link the value of this parameter to another parameter (or some other arbitrary function) min : float the lower bound of a parameter max : float the upper bound of a parameter bounds : tuple specify min and max as a single tuple--bounds may not be specified simultaneously with min or max mag : bool Specify if the unit of the parameter can be a Magnitude unit or not """ constraints = ("fixed", "tied", "bounds") """ Types of constraints a parameter can have. Excludes 'min' and 'max' which are just aliases for the first and second elements of the 'bounds' constraint (which is represented as a 2-tuple). 'prior' and 'posterior' are available for use by user fitters but are not used by any built-in fitters as of this writing. """ def __init__( self, name="", description="", default=None, unit=None, getter=None, setter=None, fixed=False, tied=False, min=None, max=None, bounds=None, prior=None, posterior=None, mag=False, ): super().__init__() self._model = None self._model_required = False self._setter = self._create_value_wrapper(setter, None) self._getter = self._create_value_wrapper(getter, None) self._name = name self.__doc__ = self._description = description.strip() # We only need to perform this check on unbound parameters if isinstance(default, Quantity): if unit is not None and not unit.is_equivalent(default.unit): raise ParameterDefinitionError( f"parameter default {default} does not have units equivalent to " f"the required unit {unit}" ) unit = default.unit default = default.value self._default = default self._mag = mag self._set_unit(unit, force=True) # Internal units correspond to raw_units held by the model in the # previous implementation. The private _getter and _setter methods # use this to convert to and from the public unit defined for the # parameter. self._internal_unit = None if not self._model_required: if self._default is not None: self.value = self._default else: self._value = None # NOTE: These are *default* constraints--on model instances constraints # are taken from the model if set, otherwise the defaults set here are # used if bounds is not None: if min is not None or max is not None: raise ValueError( "bounds may not be specified simultaneously with min or " f"max when instantiating Parameter {name}" ) else: bounds = (min, max) self._fixed = fixed self._tied = tied self._bounds = bounds self._order = None self._validator = None self._prior = prior self._posterior = posterior self._std = None def __set_name__(self, owner, name): self._name = name def __len__(self): val = self.value if val.shape == (): return 1 else: return val.shape[0] def __getitem__(self, key): value = self.value if len(value.shape) == 0: # Wrap the value in a list so that getitem can work for sensible # indices like [0] and [-1] value = [value] return value[key] def __setitem__(self, key, value): # Get the existing value and check whether it even makes sense to # apply this index oldvalue = self.value if isinstance(key, slice): if len(oldvalue[key]) == 0: raise InputParameterError( "Slice assignment outside the parameter dimensions for " f"'{self.name}'" ) for idx, val in zip(range(*key.indices(len(self))), value): self.__setitem__(idx, val) else: try: oldvalue[key] = value except IndexError: raise InputParameterError( f"Input dimension {key} invalid for {self.name!r} parameter with " f"dimension {value.shape[0]}" ) # likely wrong def __repr__(self): args = f"'{self._name}'" args += f", value={self.value}" if self.unit is not None: args += f", unit={self.unit}" for cons in self.constraints: val = getattr(self, cons) if val not in (None, False, (None, None)): # Maybe non-obvious, but False is the default for the fixed and # tied constraints args += f", {cons}={val}" return f"{self.__class__.__name__}({args})" @property def name(self): """Parameter name""" return self._name @property def default(self): """Parameter default value""" return self._default @property def value(self): """The unadorned value proxied by this parameter.""" if self._getter is None and self._setter is None: return np.float64(self._value) else: # This new implementation uses the names of internal_unit # in place of raw_unit used previously. The contrast between # internal values and units is that between the public # units that the parameter advertises to what it actually # uses internally. if self.internal_unit: return np.float64( self._getter( self._internal_value, self.internal_unit, self.unit ).value ) elif self._getter: return np.float64(self._getter(self._internal_value)) elif self._setter: return np.float64(self._internal_value) @value.setter def value(self, value): if isinstance(value, Quantity): raise TypeError( "The .value property on parameters should be set" " to unitless values, not Quantity objects. To set" "a parameter to a quantity simply set the " "parameter directly without using .value" ) if self._setter is None: self._value = np.array(value, dtype=np.float64) else: self._internal_value = np.array(self._setter(value), dtype=np.float64) @property def unit(self): """ The unit attached to this parameter, if any. On unbound parameters (i.e. parameters accessed through the model class, rather than a model instance) this is the required/ default unit for the parameter. """ return self._unit @unit.setter def unit(self, unit): if self.unit is None: raise ValueError( "Cannot attach units to parameters that were " "not initially specified with units" ) else: raise ValueError( "Cannot change the unit attribute directly, " "instead change the parameter to a new quantity" ) def _set_unit(self, unit, force=False): if force: if isinstance(unit, MagUnit) and not self._mag: raise ValueError( "This parameter does not support the magnitude units such as" f" {unit}" ) self._unit = unit else: self.unit = unit @property def internal_unit(self): """ Return the internal unit the parameter uses for the internal value stored """ return self._internal_unit @internal_unit.setter def internal_unit(self, internal_unit): """ Set the unit the parameter will convert the supplied value to the representation used internally. """ self._internal_unit = internal_unit @property def quantity(self): """ This parameter, as a :class:`~astropy.units.Quantity` instance. """ if self.unit is None: return None return self.value * self.unit @quantity.setter def quantity(self, quantity): if not isinstance(quantity, Quantity): raise TypeError( "The .quantity attribute should be set to a Quantity object" ) self.value = quantity.value self._set_unit(quantity.unit, force=True) @property def shape(self): """The shape of this parameter's value array.""" if self._setter is None: return self._value.shape return self._internal_value.shape @shape.setter def shape(self, value): if isinstance(self.value, np.generic): if value not in ((), (1,)): raise ValueError("Cannot assign this shape to a scalar quantity") else: self.value.shape = value @property def size(self): """The size of this parameter's value array.""" return np.size(self.value) @property def std(self): """Standard deviation, if available from fit.""" return self._std @std.setter def std(self, value): self._std = value @property def prior(self): return self._prior @prior.setter def prior(self, val): self._prior = val @property def posterior(self): return self._posterior @posterior.setter def posterior(self, val): self._posterior = val @property def fixed(self): """ Boolean indicating if the parameter is kept fixed during fitting. """ return self._fixed @fixed.setter def fixed(self, value): """Fix a parameter.""" if not isinstance(value, bool): raise ValueError("Value must be boolean") self._fixed = value @property def tied(self): """ Indicates that this parameter is linked to another one. A callable which provides the relationship of the two parameters. """ return self._tied @tied.setter def tied(self, value): """Tie a parameter""" if not callable(value) and value not in (False, None): raise TypeError("Tied must be a callable or set to False or None") self._tied = value @property def bounds(self): """The minimum and maximum values of a parameter as a tuple""" return self._bounds @bounds.setter def bounds(self, value): """Set the minimum and maximum values of a parameter from a tuple""" _min, _max = value if _min is not None: if not isinstance(_min, (numbers.Number, Quantity)): raise TypeError("Min value must be a number or a Quantity") if isinstance(_min, Quantity): _min = float(_min.value) else: _min = float(_min) if _max is not None: if not isinstance(_max, (numbers.Number, Quantity)): raise TypeError("Max value must be a number or a Quantity") if isinstance(_max, Quantity): _max = float(_max.value) else: _max = float(_max) self._bounds = (_min, _max) @property def min(self): """A value used as a lower bound when fitting a parameter""" return self.bounds[0] @min.setter def min(self, value): """Set a minimum value of a parameter""" self.bounds = (value, self.max) @property def max(self): """A value used as an upper bound when fitting a parameter""" return self.bounds[1] @max.setter def max(self, value): """Set a maximum value of a parameter.""" self.bounds = (self.min, value) @property def validator(self): """ Used as a decorator to set the validator method for a `Parameter`. The validator method validates any value set for that parameter. It takes two arguments--``self``, which refers to the `Model` instance (remember, this is a method defined on a `Model`), and the value being set for this parameter. The validator method's return value is ignored, but it may raise an exception if the value set on the parameter is invalid (typically an `InputParameterError` should be raised, though this is not currently a requirement). """ def validator(func, self=self): if callable(func): self._validator = func return self else: raise ValueError( "This decorator method expects a callable.\n" "The use of this method as a direct validator is\n" "deprecated; use the new validate method instead\n" ) return validator def validate(self, value): """Run the validator on this parameter""" if self._validator is not None and self._model is not None: self._validator(self._model, value) def copy( self, name=None, description=None, default=None, unit=None, getter=None, setter=None, fixed=False, tied=False, min=None, max=None, bounds=None, prior=None, posterior=None, ): """ Make a copy of this `Parameter`, overriding any of its core attributes in the process (or an exact copy). The arguments to this method are the same as those for the `Parameter` initializer. This simply returns a new `Parameter` instance with any or all of the attributes overridden, and so returns the equivalent of: .. code:: python Parameter(self.name, self.description, ...) """ kwargs = locals().copy() del kwargs["self"] for key, value in kwargs.items(): if value is None: # Annoying special cases for min/max where are just aliases for # the components of bounds if key in ("min", "max"): continue else: if hasattr(self, key): value = getattr(self, key) elif hasattr(self, "_" + key): value = getattr(self, "_" + key) kwargs[key] = value return self.__class__(**kwargs) @property def model(self): """Return the model this parameter is associated with.""" return self._model @model.setter def model(self, value): self._model = value self._setter = self._create_value_wrapper(self._setter, value) self._getter = self._create_value_wrapper(self._getter, value) if self._model_required: if self._default is not None: self.value = self._default else: self._value = None @property def _raw_value(self): """ Currently for internal use only. Like Parameter.value but does not pass the result through Parameter.getter. By design this should only be used from bound parameters. This will probably be removed are retweaked at some point in the process of rethinking how parameter values are stored/updated. """ if self._setter: return self._internal_value return self.value def _create_value_wrapper(self, wrapper, model): """Wraps a getter/setter function to support optionally passing in a reference to the model object as the second argument. If a model is tied to this parameter and its getter/setter supports a second argument then this creates a partial function using the model instance as the second argument. """ if isinstance(wrapper, np.ufunc): if wrapper.nin != 1: raise TypeError( "A numpy.ufunc used for Parameter " "getter/setter may only take one input " "argument" ) elif wrapper is None: # Just allow non-wrappers to fall through silently, for convenience return None else: inputs, _ = get_inputs_and_params(wrapper) nargs = len(inputs) if nargs == 1: pass elif nargs == 2: self._model_required = True if model is not None: # Don't make a partial function unless we're tied to a # specific model instance model_arg = inputs[1].name wrapper = functools.partial(wrapper, **{model_arg: model}) else: raise TypeError( "Parameter getter/setter must be a function " "of either one or two arguments" ) return wrapper def __array__(self, dtype=None): # Make np.asarray(self) work a little more straightforwardly arr = np.asarray(self.value, dtype=dtype) if self.unit is not None: arr = Quantity(arr, self.unit, copy=False, subok=True) return arr def __bool__(self): return bool(np.all(self.value)) __add__ = _binary_arithmetic_operation(operator.add) __radd__ = _binary_arithmetic_operation(operator.add, reflected=True) __sub__ = _binary_arithmetic_operation(operator.sub) __rsub__ = _binary_arithmetic_operation(operator.sub, reflected=True) __mul__ = _binary_arithmetic_operation(operator.mul) __rmul__ = _binary_arithmetic_operation(operator.mul, reflected=True) __pow__ = _binary_arithmetic_operation(operator.pow) __rpow__ = _binary_arithmetic_operation(operator.pow, reflected=True) __truediv__ = _binary_arithmetic_operation(operator.truediv) __rtruediv__ = _binary_arithmetic_operation(operator.truediv, reflected=True) __eq__ = _binary_comparison_operation(operator.eq) __ne__ = _binary_comparison_operation(operator.ne) __lt__ = _binary_comparison_operation(operator.lt) __gt__ = _binary_comparison_operation(operator.gt) __le__ = _binary_comparison_operation(operator.le) __ge__ = _binary_comparison_operation(operator.ge) __neg__ = _unary_arithmetic_operation(operator.neg) __abs__ = _unary_arithmetic_operation(operator.abs) def param_repr_oneline(param): """ Like array_repr_oneline but works on `Parameter` objects and supports rendering parameters with units like quantities. """ out = array_repr_oneline(param.value) if param.unit is not None: out = f"{out} {param.unit!s}" return out
ea153ac685b17c9c92714cd974026edcbaf85015949df4e4f9c94f83344620b7
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines base classes for all models. The base class of all models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is the base class for all fittable models. Fittable models can be linear or nonlinear in a regression analysis sense. All models provide a `__call__` method which performs the transformation in a purely mathematical way, i.e. the models are unitless. Model instances can represent either a single model, or a "model set" representing multiple copies of the same type of model, but with potentially different values of the parameters in each model making up the set. """ # pylint: disable=invalid-name, protected-access, redefined-outer-name import abc import copy import functools import inspect import itertools import operator import types from collections import defaultdict, deque from inspect import signature from itertools import chain import numpy as np from astropy.nddata.utils import add_array, extract_array from astropy.table import Table from astropy.units import Quantity, UnitsError, dimensionless_unscaled from astropy.units.utils import quantity_asanyarray from astropy.utils import ( IncompatibleShapeError, check_broadcast, find_current_module, indent, isiterable, metadata, sharedmethod, ) from astropy.utils.codegen import make_function_with_signature from .bounding_box import CompoundBoundingBox, ModelBoundingBox from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline from .utils import ( _combine_equivalency_dict, _ConstraintsDict, _SpecialOperatorsDict, combine_labels, get_inputs_and_params, make_binary_operator_eval, ) __all__ = [ "Model", "FittableModel", "Fittable1DModel", "Fittable2DModel", "CompoundModel", "fix_inputs", "custom_model", "ModelDefinitionError", "bind_bounding_box", "bind_compound_bounding_box", ] def _model_oper(oper, **kwargs): """ Returns a function that evaluates a given Python arithmetic operator between two models. The operator should be given as a string, like ``'+'`` or ``'**'``. """ return lambda left, right: CompoundModel(oper, left, right, **kwargs) class ModelDefinitionError(TypeError): """Used for incorrect models definitions.""" class _ModelMeta(abc.ABCMeta): """ Metaclass for Model. Currently just handles auto-generating the param_names list based on Parameter descriptors declared at the class-level of Model subclasses. """ _is_dynamic = False """ This flag signifies whether this class was created in the "normal" way, with a class statement in the body of a module, as opposed to a call to `type` or some other metaclass constructor, such that the resulting class does not belong to a specific module. This is important for pickling of dynamic classes. This flag is always forced to False for new classes, so code that creates dynamic classes should manually set it to True on those classes when creating them. """ # Default empty dict for _parameters_, which will be empty on model # classes that don't have any Parameters def __new__(mcls, name, bases, members, **kwds): # See the docstring for _is_dynamic above if "_is_dynamic" not in members: members["_is_dynamic"] = mcls._is_dynamic opermethods = [ ("__add__", _model_oper("+")), ("__sub__", _model_oper("-")), ("__mul__", _model_oper("*")), ("__truediv__", _model_oper("/")), ("__pow__", _model_oper("**")), ("__or__", _model_oper("|")), ("__and__", _model_oper("&")), ("_fix_inputs", _model_oper("fix_inputs")), ] members["_parameters_"] = { k: v for k, v in members.items() if isinstance(v, Parameter) } for opermethod, opercall in opermethods: members[opermethod] = opercall cls = super().__new__(mcls, name, bases, members, **kwds) param_names = list(members["_parameters_"]) # Need to walk each base MRO to collect all parameter names for base in bases: for tbase in base.__mro__: if issubclass(tbase, Model): # Preserve order of definitions param_names = list(tbase._parameters_) + param_names # Remove duplicates (arising from redefinition in subclass). param_names = list(dict.fromkeys(param_names)) if cls._parameters_: if hasattr(cls, "_param_names"): # Slight kludge to support compound models, where # cls.param_names is a property; could be improved with a # little refactoring but fine for now cls._param_names = tuple(param_names) else: cls.param_names = tuple(param_names) return cls def __init__(cls, name, bases, members, **kwds): super().__init__(name, bases, members, **kwds) cls._create_inverse_property(members) cls._create_bounding_box_property(members) pdict = {} for base in bases: for tbase in base.__mro__: if issubclass(tbase, Model): for parname, val in cls._parameters_.items(): pdict[parname] = val cls._handle_special_methods(members, pdict) def __repr__(cls): """ Custom repr for Model subclasses. """ return cls._format_cls_repr() def _repr_pretty_(cls, p, cycle): """ Repr for IPython's pretty printer. By default IPython "pretty prints" classes, so we need to implement this so that IPython displays the custom repr for Models. """ p.text(repr(cls)) def __reduce__(cls): if not cls._is_dynamic: # Just return a string specifying where the class can be imported # from return cls.__name__ members = dict(cls.__dict__) # Delete any ABC-related attributes--these will be restored when # the class is reconstructed: for key in list(members): if key.startswith("_abc_"): del members[key] # Delete custom __init__ and __call__ if they exist: for key in ("__init__", "__call__"): if key in members: del members[key] return (type(cls), (cls.__name__, cls.__bases__, members)) @property def name(cls): """ The name of this model class--equivalent to ``cls.__name__``. This attribute is provided for symmetry with the `Model.name` attribute of model instances. """ return cls.__name__ @property def _is_concrete(cls): """ A class-level property that determines whether the class is a concrete implementation of a Model--i.e. it is not some abstract base class or internal implementation detail (i.e. begins with '_'). """ return not (cls.__name__.startswith("_") or inspect.isabstract(cls)) def rename(cls, name=None, inputs=None, outputs=None): """ Creates a copy of this model class with a new name, inputs or outputs. The new class is technically a subclass of the original class, so that instance and type checks will still work. For example:: >>> from astropy.modeling.models import Rotation2D >>> SkyRotation = Rotation2D.rename('SkyRotation') >>> SkyRotation <class 'astropy.modeling.core.SkyRotation'> Name: SkyRotation (Rotation2D) N_inputs: 2 N_outputs: 2 Fittable parameters: ('angle',) >>> issubclass(SkyRotation, Rotation2D) True >>> r = SkyRotation(90) >>> isinstance(r, Rotation2D) True """ mod = find_current_module(2) if mod: modname = mod.__name__ else: modname = "__main__" if name is None: name = cls.name if inputs is None: inputs = cls.inputs else: if not isinstance(inputs, tuple): raise TypeError("Expected 'inputs' to be a tuple of strings.") elif len(inputs) != len(cls.inputs): raise ValueError(f"{cls.name} expects {len(cls.inputs)} inputs") if outputs is None: outputs = cls.outputs else: if not isinstance(outputs, tuple): raise TypeError("Expected 'outputs' to be a tuple of strings.") elif len(outputs) != len(cls.outputs): raise ValueError(f"{cls.name} expects {len(cls.outputs)} outputs") new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs}) new_cls.__module__ = modname new_cls.__qualname__ = name return new_cls def _create_inverse_property(cls, members): inverse = members.get("inverse") if inverse is None or cls.__bases__[0] is object: # The latter clause is the prevent the below code from running on # the Model base class, which implements the default getter and # setter for .inverse return if isinstance(inverse, property): # We allow the @property decorator to be omitted entirely from # the class definition, though its use should be encouraged for # clarity inverse = inverse.fget # Store the inverse getter internally, then delete the given .inverse # attribute so that cls.inverse resolves to Model.inverse instead cls._inverse = inverse del cls.inverse def _create_bounding_box_property(cls, members): """ Takes any bounding_box defined on a concrete Model subclass (either as a fixed tuple or a property or method) and wraps it in the generic getter/setter interface for the bounding_box attribute. """ # TODO: Much of this is verbatim from _create_inverse_property--I feel # like there could be a way to generify properties that work this way, # but for the time being that would probably only confuse things more. bounding_box = members.get("bounding_box") if bounding_box is None or cls.__bases__[0] is object: return if isinstance(bounding_box, property): bounding_box = bounding_box.fget if not callable(bounding_box): # See if it's a hard-coded bounding_box (as a sequence) and # normalize it try: bounding_box = ModelBoundingBox.validate( cls, bounding_box, _preserve_ignore=True ) except ValueError as exc: raise ModelDefinitionError(exc.args[0]) else: sig = signature(bounding_box) # May be a method that only takes 'self' as an argument (like a # property, but the @property decorator was forgotten) # # However, if the method takes additional arguments then this is a # parameterized bounding box and should be callable if len(sig.parameters) > 1: bounding_box = cls._create_bounding_box_subclass(bounding_box, sig) # See the Model.bounding_box getter definition for how this attribute # is used cls._bounding_box = bounding_box del cls.bounding_box def _create_bounding_box_subclass(cls, func, sig): """ For Models that take optional arguments for defining their bounding box, we create a subclass of ModelBoundingBox with a ``__call__`` method that supports those additional arguments. Takes the function's Signature as an argument since that is already computed in _create_bounding_box_property, so no need to duplicate that effort. """ # TODO: Might be convenient if calling the bounding box also # automatically sets the _user_bounding_box. So that # # >>> model.bounding_box(arg=1) # # in addition to returning the computed bbox, also sets it, so that # it's a shortcut for # # >>> model.bounding_box = model.bounding_box(arg=1) # # Not sure if that would be non-obvious / confusing though... def __call__(self, **kwargs): return func(self._model, **kwargs) kwargs = [] for idx, param in enumerate(sig.parameters.values()): if idx == 0: # Presumed to be a 'self' argument continue if param.default is param.empty: raise ModelDefinitionError( f"The bounding_box method for {cls.name} is not correctly " "defined: If defined as a method all arguments to that " "method (besides self) must be keyword arguments with " "default values that can be used to compute a default " "bounding box." ) kwargs.append((param.name, param.default)) __call__.__signature__ = sig return type( f"{cls.name}ModelBoundingBox", (ModelBoundingBox,), {"__call__": __call__} ) def _handle_special_methods(cls, members, pdict): # Handle init creation from inputs def update_wrapper(wrapper, cls): # Set up the new __call__'s metadata attributes as though it were # manually defined in the class definition # A bit like functools.update_wrapper but uses the class instead of # the wrapped function wrapper.__module__ = cls.__module__ wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__ if hasattr(cls, "__qualname__"): wrapper.__qualname__ = f"{cls.__qualname__}.{wrapper.__name__}" if ( "__call__" not in members and "n_inputs" in members and isinstance(members["n_inputs"], int) and members["n_inputs"] > 0 ): # Don't create a custom __call__ for classes that already have one # explicitly defined (this includes the Model base class, and any # other classes that manually override __call__ def __call__(self, *inputs, **kwargs): """Evaluate this model on the supplied inputs.""" return super(cls, self).__call__(*inputs, **kwargs) # When called, models can take two optional keyword arguments: # # * model_set_axis, which indicates (for multi-dimensional input) # which axis is used to indicate different models # # * equivalencies, a dictionary of equivalencies to be applied to # the input values, where each key should correspond to one of # the inputs. # # The following code creates the __call__ function with these # two keyword arguments. args = ("self",) kwargs = { "model_set_axis": None, "with_bounding_box": False, "fill_value": np.nan, "equivalencies": None, "inputs_map": None, } new_call = make_function_with_signature( __call__, args, kwargs, varargs="inputs", varkwargs="new_inputs" ) # The following makes it look like __call__ # was defined in the class update_wrapper(new_call, cls) cls.__call__ = new_call if ( "__init__" not in members and not inspect.isabstract(cls) and cls._parameters_ ): # Build list of all parameters including inherited ones # If *all* the parameters have default values we can make them # keyword arguments; otherwise they must all be positional # arguments if all(p.default is not None for p in pdict.values()): args = ("self",) kwargs = [] for param_name, param_val in pdict.items(): default = param_val.default unit = param_val.unit # If the unit was specified in the parameter but the # default is not a Quantity, attach the unit to the # default. if unit is not None: default = Quantity(default, unit, copy=False, subok=True) kwargs.append((param_name, default)) else: args = ("self",) + tuple(pdict.keys()) kwargs = {} def __init__(self, *params, **kwargs): return super(cls, self).__init__(*params, **kwargs) new_init = make_function_with_signature( __init__, args, kwargs, varkwargs="kwargs" ) update_wrapper(new_init, cls) cls.__init__ = new_init # *** Arithmetic operators for creating compound models *** __add__ = _model_oper("+") __sub__ = _model_oper("-") __mul__ = _model_oper("*") __truediv__ = _model_oper("/") __pow__ = _model_oper("**") __or__ = _model_oper("|") __and__ = _model_oper("&") _fix_inputs = _model_oper("fix_inputs") # *** Other utilities *** def _format_cls_repr(cls, keywords=[]): """ Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting. """ # For the sake of familiarity start the output with the standard class # __repr__ parts = [super().__repr__()] if not cls._is_concrete: return parts[0] def format_inheritance(cls): bases = [] for base in cls.mro()[1:]: if not issubclass(base, Model): continue elif inspect.isabstract(base) or base.__name__.startswith("_"): break bases.append(base.name) if bases: return f"{cls.name} ({' -> '.join(bases)})" return cls.name try: default_keywords = [ ("Name", format_inheritance(cls)), ("N_inputs", cls.n_inputs), ("N_outputs", cls.n_outputs), ] if cls.param_names: default_keywords.append(("Fittable parameters", cls.param_names)) for keyword, value in default_keywords + keywords: if value is not None: parts.append(f"{keyword}: {value}") return "\n".join(parts) except Exception: # If any of the above formatting fails fall back on the basic repr # (this is particularly useful in debugging) return parts[0] class Model(metaclass=_ModelMeta): """ Base class for all models. This is an abstract class and should not be instantiated directly. The following initialization arguments apply to the majority of Model subclasses by default (exceptions include specialized utility models like `~astropy.modeling.mappings.Mapping`). Parametric models take all their parameters as arguments, followed by any of the following optional keyword arguments: Parameters ---------- name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict, optional An optional dict of user-defined metadata to attach to this model. How this is used and interpreted is up to the user or individual use case. n_models : int, optional If given an integer greater than 1, a *model set* is instantiated instead of a single model. This affects how the parameter arguments are interpreted. In this case each parameter must be given as a list or array--elements of this array are taken along the first axis (or ``model_set_axis`` if specified), such that the Nth element is the value of that parameter for the Nth model in the set. See the section on model sets in the documentation for more details. model_set_axis : int, optional This argument only applies when creating a model set (i.e. ``n_models > 1``). It changes how parameter values are interpreted. Normally the first axis of each input parameter array (properly the 0th axis) is taken as the axis corresponding to the model sets. However, any axis of an input array may be taken as this "model set axis". This accepts negative integers as well--for example use ``model_set_axis=-1`` if the last (most rapidly changing) axis should be associated with the model sets. Also, ``model_set_axis=False`` can be used to tell that a given input should be used to evaluate all the models in the model set. fixed : dict, optional Dictionary ``{parameter_name: bool}`` setting the fixed constraint for one or more parameters. `True` means the parameter is held fixed during fitting and is prevented from updates once an instance of the model has been created. Alternatively the `~astropy.modeling.Parameter.fixed` property of a parameter may be used to lock or unlock individual parameters. tied : dict, optional Dictionary ``{parameter_name: callable}`` of parameters which are linked to some other parameter. The dictionary values are callables providing the linking relationship. Alternatively the `~astropy.modeling.Parameter.tied` property of a parameter may be used to set the ``tied`` constraint on individual parameters. bounds : dict, optional A dictionary ``{parameter_name: value}`` of lower and upper bounds of parameters. Keys are parameter names. Values are a list or a tuple of length 2 giving the desired range for the parameter. Alternatively the `~astropy.modeling.Parameter.min` and `~astropy.modeling.Parameter.max` or ~astropy.modeling.Parameter.bounds` properties of a parameter may be used to set bounds on individual parameters. eqcons : list, optional List of functions of length n such that ``eqcons[j](x0, *args) == 0.0`` in a successfully optimized problem. ineqcons : list, optional List of functions of length n such that ``ieqcons[j](x0, *args) >= 0.0`` is a successfully optimized problem. Examples -------- >>> from astropy.modeling import models >>> def tie_center(model): ... mean = 50 * model.stddev ... return mean >>> tied_parameters = {'mean': tie_center} Specify that ``'mean'`` is a tied parameter in one of two ways: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... tied=tied_parameters) or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.mean.tied False >>> g1.mean.tied = tie_center >>> g1.mean.tied <function tie_center at 0x...> Fixed parameters: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... fixed={'stddev': True}) >>> g1.stddev.fixed True or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.stddev.fixed False >>> g1.stddev.fixed = True >>> g1.stddev.fixed True """ parameter_constraints = Parameter.constraints """ Primarily for informational purposes, these are the types of constraints that can be set on a model's parameters. """ model_constraints = ("eqcons", "ineqcons") """ Primarily for informational purposes, these are the types of constraints that constrain model evaluation. """ param_names = () """ Names of the parameters that describe models of this type. The parameters in this tuple are in the same order they should be passed in when initializing a model of a specific type. Some types of models, such as polynomial models, have a different number of parameters depending on some other property of the model, such as the degree. When defining a custom model class the value of this attribute is automatically set by the `~astropy.modeling.Parameter` attributes defined in the class body. """ n_inputs = 0 """The number of inputs.""" n_outputs = 0 """ The number of outputs.""" standard_broadcasting = True fittable = False linear = True _separable = None """ A boolean flag to indicate whether a model is separable.""" meta = metadata.MetaData() """A dict-like object to store optional information.""" # By default models either use their own inverse property or have no # inverse at all, but users may also assign a custom inverse to a model, # optionally; in that case it is of course up to the user to determine # whether their inverse is *actually* an inverse to the model they assign # it to. _inverse = None _user_inverse = None _bounding_box = None _user_bounding_box = None _has_inverse_bounding_box = False # Default n_models attribute, so that __len__ is still defined even when a # model hasn't completed initialization yet _n_models = 1 # New classes can set this as a boolean value. # It is converted to a dictionary mapping input name to a boolean value. _input_units_strict = False # Allow dimensionless input (and corresponding output). If this is True, # input values to evaluate will gain the units specified in input_units. If # this is a dictionary then it should map input name to a bool to allow # dimensionless numbers for that input. # Only has an effect if input_units is defined. _input_units_allow_dimensionless = False # Default equivalencies to apply to input values. If set, this should be a # dictionary where each key is a string that corresponds to one of the # model inputs. Only has an effect if input_units is defined. input_units_equivalencies = None # Covariance matrix can be set by fitter if available. # If cov_matrix is available, then std will set as well _cov_matrix = None _stds = None def __init_subclass__(cls, **kwargs): super().__init_subclass__() def __init__(self, *args, meta=None, name=None, **kwargs): super().__init__() self._default_inputs_outputs() if meta is not None: self.meta = meta self._name = name # add parameters to instance level by walking MRO list mro = self.__class__.__mro__ for cls in mro: if issubclass(cls, Model): for parname, val in cls._parameters_.items(): newpar = copy.deepcopy(val) newpar.model = self if parname not in self.__dict__: self.__dict__[parname] = newpar self._initialize_constraints(kwargs) kwargs = self._initialize_setters(kwargs) # Remaining keyword args are either parameter values or invalid # Parameter values must be passed in as keyword arguments in order to # distinguish them self._initialize_parameters(args, kwargs) self._initialize_slices() self._initialize_unit_support() def _default_inputs_outputs(self): if self.n_inputs == 1 and self.n_outputs == 1: self._inputs = ("x",) self._outputs = ("y",) elif self.n_inputs == 2 and self.n_outputs == 1: self._inputs = ("x", "y") self._outputs = ("z",) else: try: self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs)) self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs)) except TypeError: # self.n_inputs and self.n_outputs are properties # This is the case when subclasses of Model do not define # ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``. self._inputs = () self._outputs = () def _initialize_setters(self, kwargs): """ This exists to inject defaults for settable properties for models originating from `custom_model`. """ if hasattr(self, "_settable_properties"): setters = { name: kwargs.pop(name, default) for name, default in self._settable_properties.items() } for name, value in setters.items(): setattr(self, name, value) return kwargs @property def inputs(self): return self._inputs @inputs.setter def inputs(self, val): if len(val) != self.n_inputs: raise ValueError( f"Expected {self.n_inputs} number of inputs, got {len(val)}." ) self._inputs = val self._initialize_unit_support() @property def outputs(self): return self._outputs @outputs.setter def outputs(self, val): if len(val) != self.n_outputs: raise ValueError( f"Expected {self.n_outputs} number of outputs, got {len(val)}." ) self._outputs = val @property def n_inputs(self): # TODO: remove the code in the ``if`` block when support # for models with ``inputs`` as class variables is removed. if hasattr(self.__class__, "n_inputs") and isinstance( self.__class__.n_inputs, property ): try: return len(self.__class__.inputs) except TypeError: try: return len(self.inputs) except AttributeError: return 0 return self.__class__.n_inputs @property def n_outputs(self): # TODO: remove the code in the ``if`` block when support # for models with ``outputs`` as class variables is removed. if hasattr(self.__class__, "n_outputs") and isinstance( self.__class__.n_outputs, property ): try: return len(self.__class__.outputs) except TypeError: try: return len(self.outputs) except AttributeError: return 0 return self.__class__.n_outputs def _calculate_separability_matrix(self): """ This is a hook which customises the behavior of modeling.separable. This allows complex subclasses to customise the separability matrix. If it returns `NotImplemented` the default behavior is used. """ return NotImplemented def _initialize_unit_support(self): """ Convert self._input_units_strict and self.input_units_allow_dimensionless to dictionaries mapping input name to a boolean value. """ if isinstance(self._input_units_strict, bool): self._input_units_strict = { key: self._input_units_strict for key in self.inputs } if isinstance(self._input_units_allow_dimensionless, bool): self._input_units_allow_dimensionless = { key: self._input_units_allow_dimensionless for key in self.inputs } @property def input_units_strict(self): """ Enforce strict units on inputs to evaluate. If this is set to True, input values to evaluate will be in the exact units specified by input_units. If the input quantities are convertible to input_units, they are converted. If this is a dictionary then it should map input name to a bool to set strict input units for that parameter. """ val = self._input_units_strict if isinstance(val, bool): return {key: val for key in self.inputs} return dict(zip(self.inputs, val.values())) @property def input_units_allow_dimensionless(self): """ Allow dimensionless input (and corresponding output). If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Only has an effect if input_units is defined. """ val = self._input_units_allow_dimensionless if isinstance(val, bool): return {key: val for key in self.inputs} return dict(zip(self.inputs, val.values())) @property def uses_quantity(self): """ True if this model has been created with `~astropy.units.Quantity` objects or if there are no parameters. This can be used to determine if this model should be evaluated with `~astropy.units.Quantity` or regular floats. """ pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)] return (len(pisq) == 0) or any(pisq) def __repr__(self): return self._format_repr() def __str__(self): return self._format_str() def __len__(self): return self._n_models @staticmethod def _strip_ones(intup): return tuple(item for item in intup if item != 1) def __setattr__(self, attr, value): if isinstance(self, CompoundModel): param_names = self._param_names param_names = self.param_names if param_names is not None and attr in self.param_names: param = self.__dict__[attr] value = _tofloat(value) if param._validator is not None: param._validator(self, value) # check consistency with previous shape and size eshape = self._param_metrics[attr]["shape"] if eshape == (): eshape = (1,) vshape = np.array(value).shape if vshape == (): vshape = (1,) esize = self._param_metrics[attr]["size"] if np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones( eshape ): raise InputParameterError( f"Value for parameter {attr} does not match shape or size\nexpected" f" by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})" ) if param.unit is None: if isinstance(value, Quantity): param._unit = value.unit param.value = value.value else: param.value = value else: if not isinstance(value, Quantity): raise UnitsError( f"The '{param.name}' parameter should be given as a" " Quantity because it was originally " "initialized as a Quantity" ) param._unit = value.unit param.value = value.value else: if attr in ["fittable", "linear"]: self.__dict__[attr] = value else: super().__setattr__(attr, value) def _pre_evaluate(self, *args, **kwargs): """ Model specific input setup that needs to occur prior to model evaluation """ # Broadcast inputs into common size inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs) # Setup actual model evaluation method parameters = self._param_sets(raw=True, units=True) def evaluate(_inputs): return self.evaluate(*chain(_inputs, parameters)) return evaluate, inputs, broadcasted_shapes, kwargs def get_bounding_box(self, with_bbox=True): """ Return the ``bounding_box`` of a model if it exists or ``None`` otherwise. Parameters ---------- with_bbox : The value of the ``with_bounding_box`` keyword argument when calling the model. Default is `True` for usage when looking up the model's ``bounding_box`` without risk of error. """ bbox = None if not isinstance(with_bbox, bool) or with_bbox: try: bbox = self.bounding_box except NotImplementedError: pass if isinstance(bbox, CompoundBoundingBox) and not isinstance( with_bbox, bool ): bbox = bbox[with_bbox] return bbox @property def _argnames(self): """The inputs used to determine input_shape for bounding_box evaluation""" return self.inputs def _validate_input_shape( self, _input, idx, argnames, model_set_axis, check_model_set_axis ): """ Perform basic validation of a single model input's shape -- it has the minimum dimensions for the given model_set_axis Returns the shape of the input if validation succeeds. """ input_shape = np.shape(_input) # Ensure that the input's model_set_axis matches the model's # n_models if input_shape and check_model_set_axis: # Note: Scalar inputs *only* get a pass on this if len(input_shape) < model_set_axis + 1: raise ValueError( f"For model_set_axis={model_set_axis}, all inputs must be at " f"least {model_set_axis + 1}-dimensional." ) if input_shape[model_set_axis] != self._n_models: try: argname = argnames[idx] except IndexError: # the case of model.inputs = () argname = str(idx) raise ValueError( f"Input argument '{argname}' does not have the correct dimensions" f" in model_set_axis={model_set_axis} for a model set with" f" n_models={self._n_models}." ) return input_shape def _validate_input_shapes(self, inputs, argnames, model_set_axis): """ Perform basic validation of model inputs --that they are mutually broadcastable and that they have the minimum dimensions for the given model_set_axis. If validation succeeds, returns the total shape that will result from broadcasting the input arrays with each other. """ check_model_set_axis = self._n_models > 1 and model_set_axis is not False all_shapes = [] for idx, _input in enumerate(inputs): all_shapes.append( self._validate_input_shape( _input, idx, argnames, model_set_axis, check_model_set_axis ) ) input_shape = check_broadcast(*all_shapes) if input_shape is None: raise ValueError( "All inputs must have identical shapes or must be scalars." ) return input_shape def input_shape(self, inputs): """Get input shape for bounding_box evaluation""" return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis) def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox): """ Generic model evaluation routine Selects and evaluates model with or without bounding_box enforcement """ # Evaluate the model using the prepared evaluation method either # enforcing the bounding_box or not. bbox = self.get_bounding_box(with_bbox) if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None: outputs = bbox.evaluate(evaluate, _inputs, fill_value) else: outputs = evaluate(_inputs) return outputs def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ Model specific post evaluation processing of outputs """ if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1: outputs = (outputs,) outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs) outputs = self._process_output_units(inputs, outputs) if self.n_outputs == 1: return outputs[0] return outputs @property def bbox_with_units(self): return not isinstance(self, CompoundModel) def __call__(self, *args, **kwargs): """ Evaluate this model using the given input(s) and the parameter values that were specified when the model was instantiated. """ # Turn any keyword arguments into positional arguments. args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs) # Read model evaluation related parameters with_bbox = kwargs.pop("with_bounding_box", False) fill_value = kwargs.pop("fill_value", np.nan) # prepare for model evaluation (overridden in CompoundModel) evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate( *args, **kwargs ) outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox) # post-process evaluation results (overridden in CompoundModel) return self._post_evaluate( inputs, outputs, broadcasted_shapes, with_bbox, **kwargs ) def _get_renamed_inputs_as_positional(self, *args, **kwargs): def _keyword2positional(kwargs): # Inputs were passed as keyword (not positional) arguments. # Because the signature of the ``__call__`` is defined at # the class level, the name of the inputs cannot be changed at # the instance level and the old names are always present in the # signature of the method. In order to use the new names of the # inputs, the old names are taken out of ``kwargs``, the input # values are sorted in the order of self.inputs and passed as # positional arguments to ``__call__``. # These are the keys that are always present as keyword arguments. keys = [ "model_set_axis", "with_bounding_box", "fill_value", "equivalencies", "inputs_map", ] new_inputs = {} # kwargs contain the names of the new inputs + ``keys`` allkeys = list(kwargs.keys()) # Remove the names of the new inputs from kwargs and save them # to a dict ``new_inputs``. for key in allkeys: if key not in keys: new_inputs[key] = kwargs[key] del kwargs[key] return new_inputs, kwargs n_args = len(args) new_inputs, kwargs = _keyword2positional(kwargs) n_all_args = n_args + len(new_inputs) if n_all_args < self.n_inputs: raise ValueError( f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}" ) elif n_all_args > self.n_inputs: raise ValueError( f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}" ) if n_args == 0: # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: new_args.append(new_inputs[k]) elif n_args != self.n_inputs: # Some inputs are passed as positional, others as keyword arguments. args = list(args) # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: if k in new_inputs: new_args.append(new_inputs[k]) else: new_args.append(args[0]) del args[0] else: new_args = args return new_args, kwargs # *** Properties *** @property def name(self): """User-provided name for this model instance.""" return self._name @name.setter def name(self, val): """Assign a (new) name to this model.""" self._name = val @property def model_set_axis(self): """ The index of the model set axis--that is the axis of a parameter array that pertains to which model a parameter value pertains to--as specified when the model was initialized. See the documentation on :ref:`astropy:modeling-model-sets` for more details. """ return self._model_set_axis @property def param_sets(self): """ Return parameters as a pset. This is a list with one item per parameter set, which is an array of that parameter's values across all parameter sets, with the last axis associated with the parameter set. """ return self._param_sets() @property def parameters(self): """ A flattened array of all parameter values in all parameter sets. Fittable parameters maintain this list and fitters modify it. """ # Currently the sequence of a model's parameters must be contiguous # within the _parameters array (which may be a view of a larger array, # for example when taking a sub-expression of a compound model), so # the assumption here is reliable: if not self.param_names: # Trivial, but not unheard of return self._parameters self._parameters_to_array() start = self._param_metrics[self.param_names[0]]["slice"].start stop = self._param_metrics[self.param_names[-1]]["slice"].stop return self._parameters[start:stop] @parameters.setter def parameters(self, value): """ Assigning to this attribute updates the parameters array rather than replacing it. """ if not self.param_names: return start = self._param_metrics[self.param_names[0]]["slice"].start stop = self._param_metrics[self.param_names[-1]]["slice"].stop try: value = np.array(value).flatten() self._parameters[start:stop] = value except ValueError as e: raise InputParameterError( "Input parameter values not compatible with the model " f"parameters array: {e!r}" ) self._array_to_parameters() @property def sync_constraints(self): """ This is a boolean property that indicates whether or not accessing constraints automatically check the constituent models current values. It defaults to True on creation of a model, but for fitting purposes it should be set to False for performance reasons. """ if not hasattr(self, "_sync_constraints"): self._sync_constraints = True return self._sync_constraints @sync_constraints.setter def sync_constraints(self, value): if not isinstance(value, bool): raise ValueError("sync_constraints only accepts True or False as values") self._sync_constraints = value @property def fixed(self): """ A ``dict`` mapping parameter names to their fixed constraint. """ if not hasattr(self, "_fixed") or self.sync_constraints: self._fixed = _ConstraintsDict(self, "fixed") return self._fixed @property def bounds(self): """ A ``dict`` mapping parameter names to their upper and lower bounds as ``(min, max)`` tuples or ``[min, max]`` lists. """ if not hasattr(self, "_bounds") or self.sync_constraints: self._bounds = _ConstraintsDict(self, "bounds") return self._bounds @property def tied(self): """ A ``dict`` mapping parameter names to their tied constraint. """ if not hasattr(self, "_tied") or self.sync_constraints: self._tied = _ConstraintsDict(self, "tied") return self._tied @property def eqcons(self): """List of parameter equality constraints.""" return self._mconstraints["eqcons"] @property def ineqcons(self): """List of parameter inequality constraints.""" return self._mconstraints["ineqcons"] def has_inverse(self): """ Returns True if the model has an analytic or user inverse defined. """ try: self.inverse except NotImplementedError: return False return True @property def inverse(self): """ Returns a new `~astropy.modeling.Model` instance which performs the inverse transform, if an analytic inverse is defined for this model. Even on models that don't have an inverse defined, this property can be set with a manually-defined inverse, such a pre-computed or experimentally determined inverse (often given as a `~astropy.modeling.polynomial.PolynomialModel`, but not by requirement). A custom inverse can be deleted with ``del model.inverse``. In this case the model's inverse is reset to its default, if a default exists (otherwise the default is to raise `NotImplementedError`). Note to authors of `~astropy.modeling.Model` subclasses: To define an inverse for a model simply override this property to return the appropriate model representing the inverse. The machinery that will make the inverse manually-overridable is added automatically by the base class. """ if self._user_inverse is not None: return self._user_inverse elif self._inverse is not None: result = self._inverse() if result is not NotImplemented: if not self._has_inverse_bounding_box: result.bounding_box = None return result raise NotImplementedError( "No analytical or user-supplied inverse transform " "has been implemented for this model." ) @inverse.setter def inverse(self, value): if not isinstance(value, (Model, type(None))): raise ValueError( "The ``inverse`` attribute may be assigned a `Model` " "instance or `None` (where `None` explicitly forces the " "model to have no inverse." ) self._user_inverse = value @inverse.deleter def inverse(self): """ Resets the model's inverse to its default (if one exists, otherwise the model will have no inverse). """ try: del self._user_inverse except AttributeError: pass @property def has_user_inverse(self): """ A flag indicating whether or not a custom inverse model has been assigned to this model by a user, via assignment to ``model.inverse``. """ return self._user_inverse is not None @property def bounding_box(self): r""" A `tuple` of length `n_inputs` defining the bounding box limits, or raise `NotImplementedError` for no bounding_box. The default limits are given by a ``bounding_box`` property or method defined in the class body of a specific model. If not defined then this property just raises `NotImplementedError` by default (but may be assigned a custom value by a user). ``bounding_box`` can be set manually to an array-like object of shape ``(model.n_inputs, 2)``. For further usage, see :ref:`astropy:bounding-boxes` The limits are ordered according to the `numpy` ``'C'`` indexing convention, and are the reverse of the model input order, e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined: * for 1D: ``(x_low, x_high)`` * for 2D: ``((y_low, y_high), (x_low, x_high))`` * for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))`` Examples -------- Setting the ``bounding_box`` limits for a 1D and 2D model: >>> from astropy.modeling.models import Gaussian1D, Gaussian2D >>> model_1d = Gaussian1D() >>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1) >>> model_1d.bounding_box = (-5, 5) >>> model_2d.bounding_box = ((-6, 6), (-5, 5)) Setting the bounding_box limits for a user-defined 3D `custom_model`: >>> from astropy.modeling.models import custom_model >>> def const3d(x, y, z, amp=1): ... return amp ... >>> Const3D = custom_model(const3d) >>> model_3d = Const3D() >>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4)) To reset ``bounding_box`` to its default limits just delete the user-defined value--this will reset it back to the default defined on the class: >>> del model_1d.bounding_box To disable the bounding box entirely (including the default), set ``bounding_box`` to `None`: >>> model_1d.bounding_box = None >>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): NotImplementedError: No bounding box is defined for this model (note: the bounding box was explicitly disabled for this model; use `del model.bounding_box` to restore the default bounding box, if one is defined for this model). """ if self._user_bounding_box is not None: if self._user_bounding_box is NotImplemented: raise NotImplementedError( "No bounding box is defined for this model (note: the " "bounding box was explicitly disabled for this model; " "use `del model.bounding_box` to restore the default " "bounding box, if one is defined for this model)." ) return self._user_bounding_box elif self._bounding_box is None: raise NotImplementedError("No bounding box is defined for this model.") elif isinstance(self._bounding_box, ModelBoundingBox): # This typically implies a hard-coded bounding box. This will # probably be rare, but it is an option return self._bounding_box elif isinstance(self._bounding_box, types.MethodType): return ModelBoundingBox.validate(self, self._bounding_box()) else: # The only other allowed possibility is that it's a ModelBoundingBox # subclass, so we call it with its default arguments and return an # instance of it (that can be called to recompute the bounding box # with any optional parameters) # (In other words, in this case self._bounding_box is a *class*) bounding_box = self._bounding_box((), model=self)() return self._bounding_box(bounding_box, model=self) @bounding_box.setter def bounding_box(self, bounding_box): """ Assigns the bounding box limits. """ if bounding_box is None: cls = None # We use this to explicitly set an unimplemented bounding box (as # opposed to no user bounding box defined) bounding_box = NotImplemented elif isinstance(bounding_box, CompoundBoundingBox) or isinstance( bounding_box, dict ): cls = CompoundBoundingBox elif isinstance(self._bounding_box, type) and issubclass( self._bounding_box, ModelBoundingBox ): cls = self._bounding_box else: cls = ModelBoundingBox if cls is not None: try: bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True) except ValueError as exc: raise ValueError(exc.args[0]) self._user_bounding_box = bounding_box def set_slice_args(self, *args): if isinstance(self._user_bounding_box, CompoundBoundingBox): self._user_bounding_box.slice_args = args else: raise RuntimeError("The bounding_box for this model is not compound") @bounding_box.deleter def bounding_box(self): self._user_bounding_box = None @property def has_user_bounding_box(self): """ A flag indicating whether or not a custom bounding_box has been assigned to this model by a user, via assignment to ``model.bounding_box``. """ return self._user_bounding_box is not None @property def cov_matrix(self): """ Fitter should set covariance matrix, if available. """ return self._cov_matrix @cov_matrix.setter def cov_matrix(self, cov): self._cov_matrix = cov unfix_untied_params = [ p for p in self.param_names if (self.fixed[p] is False) and (self.tied[p] is False) ] if type(cov) == list: # model set param_stds = [] for c in cov: param_stds.append( [np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)] ) for p, param_name in enumerate(unfix_untied_params): par = getattr(self, param_name) par.std = [item[p] for item in param_stds] setattr(self, param_name, par) else: param_stds = [ np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix) ] for param_name in unfix_untied_params: par = getattr(self, param_name) par.std = param_stds.pop(0) setattr(self, param_name, par) @property def stds(self): """ Standard deviation of parameters, if covariance matrix is available. """ return self._stds @stds.setter def stds(self, stds): self._stds = stds @property def separable(self): """A flag indicating whether a model is separable.""" if self._separable is not None: return self._separable raise NotImplementedError( 'The "separable" property is not defined for ' f"model {self.__class__.__name__}" ) # *** Public methods *** def without_units_for_data(self, **kwargs): """ Return an instance of the model for which the parameter values have been converted to the right units for the data, then the units have been stripped away. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters should be converted to are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = { inp: getattr(kwargs[inp], "unit", dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None } outputs_unit = { out: getattr(kwargs[out], "unit", dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None } parameter_units = self._parameter_units_for_data_units( inputs_unit, outputs_unit ) for name, unit in parameter_units.items(): parameter = getattr(model, name) if parameter.unit is not None: parameter.value = parameter.quantity.to(unit).value parameter._set_unit(None, force=True) if isinstance(model, CompoundModel): model.strip_units_from_tree() return model def output_units(self, **kwargs): """ Return a dictionary of output units for this model given a dictionary of fitting inputs and outputs The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). This method will force extra model evaluations, which maybe computationally expensive. To avoid this, one can add a return_units property to the model, see :ref:`astropy:models_return_units`. """ units = self.return_units if units is None or units == {}: inputs = {inp: kwargs[inp] for inp in self.inputs} values = self(**inputs) if self.n_outputs == 1: values = (values,) units = { out: getattr(values[index], "unit", dimensionless_unscaled) for index, out in enumerate(self.outputs) } return units def strip_units_from_tree(self): for item in self._leaflist: for parname in item.param_names: par = getattr(item, parname) par._set_unit(None, force=True) def with_units_from_data(self, **kwargs): """ Return an instance of the model which has units for which the parameter values are compatible with the data units specified. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters will gain are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = { inp: getattr(kwargs[inp], "unit", dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None } outputs_unit = { out: getattr(kwargs[out], "unit", dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None } parameter_units = self._parameter_units_for_data_units( inputs_unit, outputs_unit ) # We are adding units to parameters that already have a value, but we # don't want to convert the parameter, just add the unit directly, # hence the call to ``_set_unit``. for name, unit in parameter_units.items(): parameter = getattr(model, name) parameter._set_unit(unit, force=True) return model @property def _has_units(self): # Returns True if any of the parameters have units for param in self.param_names: if getattr(self, param).unit is not None: return True else: return False @property def _supports_unit_fitting(self): # If the model has a ``_parameter_units_for_data_units`` method, this # indicates that we have enough information to strip the units away # and add them back after fitting, when fitting quantities return hasattr(self, "_parameter_units_for_data_units") @abc.abstractmethod def evaluate(self, *args, **kwargs): """Evaluate the model on some input variables.""" def sum_of_implicit_terms(self, *args, **kwargs): """ Evaluate the sum of any implicit model terms on some input variables. This includes any fixed terms used in evaluating a linear model that do not have corresponding parameters exposed to the user. The prototypical case is `astropy.modeling.functional_models.Shift`, which corresponds to a function y = a + bx, where b=1 is intrinsically fixed by the type of model, such that sum_of_implicit_terms(x) == x. This method is needed by linear fitters to correct the dependent variable for the implicit term(s) when solving for the remaining terms (ie. a = y - bx). """ def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ try: bbox = self.bounding_box except NotImplementedError: bbox = None if isinstance(bbox, ModelBoundingBox): bbox = bbox.bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError("If no bounding_box is set, coords or out must be input.") # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError("inconsistent shape of the output.") else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError( "the array and model must have the same number of dimensions." ) if bbox is not None: # Assures position is at center pixel, # important when using add_array. pd = ( np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]) .astype(int) .T ) pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array( [extract_array(c, sub_shape, pos) for c in coords] ) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( "The `bounding_box` is larger than the input out in " "one or more dimensions. Set " "`model.bounding_box = None`." ) else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out @property def input_units(self): """ This property is used to indicate what units or sets of units the evaluate method expects, and returns a dictionary mapping inputs to units (or `None` if any units are accepted). Model sub-classes can also use function annotations in evaluate to indicate valid input units, in which case this property should not be overridden since it will return the input units based on the annotations. """ if hasattr(self, "_input_units"): return self._input_units elif hasattr(self.evaluate, "__annotations__"): annotations = self.evaluate.__annotations__.copy() annotations.pop("return", None) if annotations: # If there are not annotations for all inputs this will error. return {name: annotations[name] for name in self.inputs} else: # None means any unit is accepted return None @property def return_units(self): """ This property is used to indicate what units or sets of units the output of evaluate should be in, and returns a dictionary mapping outputs to units (or `None` if any units are accepted). Model sub-classes can also use function annotations in evaluate to indicate valid output units, in which case this property should not be overridden since it will return the return units based on the annotations. """ if hasattr(self, "_return_units"): return self._return_units elif hasattr(self.evaluate, "__annotations__"): return self.evaluate.__annotations__.get("return", None) else: # None means any unit is accepted return None def _prepare_inputs_single_model(self, params, inputs, **kwargs): broadcasts = [] for idx, _input in enumerate(inputs): input_shape = _input.shape # Ensure that array scalars are always upgrade to 1-D arrays for the # sake of consistency with how parameters work. They will be cast back # to scalars at the end if not input_shape: inputs[idx] = _input.reshape((1,)) if not params: max_broadcast = input_shape else: max_broadcast = () for param in params: try: if self.standard_broadcasting: broadcast = check_broadcast(input_shape, param.shape) else: broadcast = input_shape except IncompatibleShapeError: raise ValueError( f"self input argument {self.inputs[idx]!r} of shape" f" {input_shape!r} cannot be broadcast with parameter" f" {param.name!r} of shape {param.shape!r}." ) if len(broadcast) > len(max_broadcast): max_broadcast = broadcast elif len(broadcast) == len(max_broadcast): max_broadcast = max(max_broadcast, broadcast) broadcasts.append(max_broadcast) if self.n_outputs > self.n_inputs: extra_outputs = self.n_outputs - self.n_inputs if not broadcasts: # If there were no inputs then the broadcasts list is empty # just add a None since there is no broadcasting of outputs and # inputs necessary (see _prepare_outputs_single_self) broadcasts.append(None) broadcasts.extend([broadcasts[0]] * extra_outputs) return inputs, (broadcasts,) @staticmethod def _remove_axes_from_shape(shape, axis): """ Given a shape tuple as the first input, construct a new one by removing that particular axis from the shape and all preceeding axes. Negative axis numbers are permittted, where the axis is relative to the last axis. """ if len(shape) == 0: return shape if axis < 0: axis = len(shape) + axis return shape[:axis] + shape[axis + 1 :] if axis >= len(shape): axis = len(shape) - 1 shape = shape[axis + 1 :] return shape def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs): reshaped = [] pivots = [] model_set_axis_param = self.model_set_axis # needed to reshape param for idx, _input in enumerate(inputs): max_param_shape = () if self._n_models > 1 and model_set_axis_input is not False: # Use the shape of the input *excluding* the model axis input_shape = ( _input.shape[:model_set_axis_input] + _input.shape[model_set_axis_input + 1 :] ) else: input_shape = _input.shape for param in params: try: check_broadcast( input_shape, self._remove_axes_from_shape(param.shape, model_set_axis_param), ) except IncompatibleShapeError: raise ValueError( f"Model input argument {self.inputs[idx]!r} of shape" f" {input_shape!r} " f"cannot be broadcast with parameter {param.name!r} of shape " f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}." ) if len(param.shape) - 1 > len(max_param_shape): max_param_shape = self._remove_axes_from_shape( param.shape, model_set_axis_param ) # We've now determined that, excluding the model_set_axis, the # input can broadcast with all the parameters input_ndim = len(input_shape) if model_set_axis_input is False: if len(max_param_shape) > input_ndim: # Just needs to prepend new axes to the input n_new_axes = 1 + len(max_param_shape) - input_ndim new_axes = (1,) * n_new_axes new_shape = new_axes + _input.shape pivot = model_set_axis_param else: pivot = input_ndim - len(max_param_shape) new_shape = _input.shape[:pivot] + (1,) + _input.shape[pivot:] new_input = _input.reshape(new_shape) else: if len(max_param_shape) >= input_ndim: n_new_axes = len(max_param_shape) - input_ndim pivot = self.model_set_axis new_axes = (1,) * n_new_axes new_shape = ( _input.shape[: pivot + 1] + new_axes + _input.shape[pivot + 1 :] ) new_input = _input.reshape(new_shape) else: pivot = _input.ndim - len(max_param_shape) - 1 new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1) pivots.append(pivot) reshaped.append(new_input) if self.n_inputs < self.n_outputs: pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs)) return reshaped, (pivots,) def prepare_inputs( self, *inputs, model_set_axis=None, equivalencies=None, **kwargs ): """ This method is used in `~astropy.modeling.Model.__call__` to ensure that all the inputs to the model can be broadcast into compatible shapes (if one or both of them are input as arrays), particularly if there are more than one parameter sets. This also makes sure that (if applicable) the units of the input will be compatible with the evaluate method. """ # When we instantiate the model class, we make sure that __call__ can # take the following two keyword arguments: model_set_axis and # equivalencies. if model_set_axis is None: # By default the model_set_axis for the input is assumed to be the # same as that for the parameters the model was defined with # TODO: Ensure that negative model_set_axis arguments are respected model_set_axis = self.model_set_axis params = [getattr(self, name) for name in self.param_names] inputs = [np.asanyarray(_input, dtype=float) for _input in inputs] self._validate_input_shapes(inputs, self.inputs, model_set_axis) inputs_map = kwargs.get("inputs_map", None) inputs = self._validate_input_units(inputs, equivalencies, inputs_map) # The input formatting required for single models versus a multiple # model set are different enough that they've been split into separate # subroutines if self._n_models == 1: return self._prepare_inputs_single_model(params, inputs, **kwargs) else: return self._prepare_inputs_model_set( params, inputs, model_set_axis, **kwargs ) def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None): inputs = list(inputs) name = self.name or self.__class__.__name__ # Check that the units are correct, if applicable if self.input_units is not None: # If a leaflist is provided that means this is in the context of # a compound model and it is necessary to create the appropriate # alias for the input coordinate name for the equivalencies dict if inputs_map: edict = {} for mod, mapping in inputs_map: if self is mod: edict[mapping[0]] = equivalencies[mapping[1]] else: edict = equivalencies # We combine any instance-level input equivalencies with user # specified ones at call-time. input_units_equivalencies = _combine_equivalency_dict( self.inputs, edict, self.input_units_equivalencies ) # We now iterate over the different inputs and make sure that their # units are consistent with those specified in input_units. for i in range(len(inputs)): input_name = self.inputs[i] input_unit = self.input_units.get(input_name, None) if input_unit is None: continue if isinstance(inputs[i], Quantity): # We check for consistency of the units with input_units, # taking into account any equivalencies if inputs[i].unit.is_equivalent( input_unit, equivalencies=input_units_equivalencies[input_name] ): # If equivalencies have been specified, we need to # convert the input to the input units - this is # because some equivalencies are non-linear, and # we need to be sure that we evaluate the model in # its own frame of reference. If input_units_strict # is set, we also need to convert to the input units. if ( len(input_units_equivalencies) > 0 or self.input_units_strict[input_name] ): inputs[i] = inputs[i].to( input_unit, equivalencies=input_units_equivalencies[input_name], ) else: # We consider the following two cases separately so as # to be able to raise more appropriate/nicer exceptions if input_unit is dimensionless_unscaled: raise UnitsError( f"{name}: Units of input '{self.inputs[i]}', " f"{inputs[i].unit} ({inputs[i].unit.physical_type})," "could not be converted to " "required dimensionless " "input" ) else: raise UnitsError( f"{name}: Units of input '{self.inputs[i]}', " f"{inputs[i].unit} ({inputs[i].unit.physical_type})," " could not be " "converted to required input" f" units of {input_unit} ({input_unit.physical_type})" ) else: # If we allow dimensionless input, we add the units to the # input values without conversion, otherwise we raise an # exception. if ( not self.input_units_allow_dimensionless[input_name] and input_unit is not dimensionless_unscaled and input_unit is not None ): if np.any(inputs[i] != 0): raise UnitsError( f"{name}: Units of input '{self.inputs[i]}'," " (dimensionless), could not be converted to required " f"input units of {input_unit} " f"({input_unit.physical_type})" ) return inputs def _process_output_units(self, inputs, outputs): inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs]) if self.return_units and inputs_are_quantity: # We allow a non-iterable unit only if there is one output if self.n_outputs == 1 and not isiterable(self.return_units): return_units = {self.outputs[0]: self.return_units} else: return_units = self.return_units outputs = tuple( Quantity(out, return_units.get(out_name, None), subok=True) for out, out_name in zip(outputs, self.outputs) ) return outputs @staticmethod def _prepare_output_single_model(output, broadcast_shape): if broadcast_shape is not None: if not broadcast_shape: return output.item() else: try: return output.reshape(broadcast_shape) except ValueError: try: return output.item() except ValueError: return output return output def _prepare_outputs_single_model(self, outputs, broadcasted_shapes): outputs = list(outputs) for idx, output in enumerate(outputs): try: broadcast_shape = check_broadcast(*broadcasted_shapes[0]) except (IndexError, TypeError): broadcast_shape = broadcasted_shapes[0][idx] outputs[idx] = self._prepare_output_single_model(output, broadcast_shape) return tuple(outputs) def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis): pivots = broadcasted_shapes[0] # If model_set_axis = False was passed then use # self._model_set_axis to format the output. if model_set_axis is None or model_set_axis is False: model_set_axis = self.model_set_axis outputs = list(outputs) for idx, output in enumerate(outputs): pivot = pivots[idx] if pivot < output.ndim and pivot != model_set_axis: outputs[idx] = np.rollaxis(output, pivot, model_set_axis) return tuple(outputs) def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs): model_set_axis = kwargs.get("model_set_axis", None) if len(self) == 1: return self._prepare_outputs_single_model(outputs, broadcasted_shapes) else: return self._prepare_outputs_model_set( outputs, broadcasted_shapes, model_set_axis ) def copy(self): """ Return a copy of this model. Uses a deep copy so that all model attributes, including parameter values, are copied as well. """ return copy.deepcopy(self) def deepcopy(self): """ Return a deep copy of this model. """ return self.copy() @sharedmethod def rename(self, name): """ Return a copy of this model with a new name. """ new_model = self.copy() new_model._name = name return new_model def coerce_units( self, input_units=None, return_units=None, input_units_equivalencies=None, input_units_allow_dimensionless=False, ): """ Attach units to this (unitless) model. Parameters ---------- input_units : dict or tuple, optional Input units to attach. If dict, each key is the name of a model input, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.inputs`. return_units : dict or tuple, optional Output units to attach. If dict, each key is the name of a model output, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.outputs`. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : bool or dict, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Returns ------- `CompoundModel` A `CompoundModel` composed of the current model plus `~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units. Raises ------ ValueError If the current model already has units. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s> """ from .mappings import UnitsMapping result = self if input_units is not None: if self.input_units is not None: model_units = self.input_units else: model_units = {} for unit in [model_units.get(i) for i in self.inputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError( "Cannot specify input_units for model with existing input units" ) if isinstance(input_units, dict): if input_units.keys() != set(self.inputs): message = ( f"""input_units keys ({", ".join(input_units.keys())}) """ f"""do not match model inputs ({", ".join(self.inputs)})""" ) raise ValueError(message) input_units = [input_units[i] for i in self.inputs] if len(input_units) != self.n_inputs: message = ( "input_units length does not match n_inputs: " f"expected {self.n_inputs}, received {len(input_units)}" ) raise ValueError(message) mapping = tuple( (unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units) ) input_mapping = UnitsMapping( mapping, input_units_equivalencies=input_units_equivalencies, input_units_allow_dimensionless=input_units_allow_dimensionless, ) input_mapping.inputs = self.inputs input_mapping.outputs = self.inputs result = input_mapping | result if return_units is not None: if self.return_units is not None: model_units = self.return_units else: model_units = {} for unit in [model_units.get(i) for i in self.outputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError( "Cannot specify return_units for model " "with existing output units" ) if isinstance(return_units, dict): if return_units.keys() != set(self.outputs): message = ( f"""return_units keys ({", ".join(return_units.keys())}) """ f"""do not match model outputs ({", ".join(self.outputs)})""" ) raise ValueError(message) return_units = [return_units[i] for i in self.outputs] if len(return_units) != self.n_outputs: message = ( "return_units length does not match n_outputs: " f"expected {self.n_outputs}, received {len(return_units)}" ) raise ValueError(message) mapping = tuple( (model_units.get(i), unit) for i, unit in zip(self.outputs, return_units) ) return_mapping = UnitsMapping(mapping) return_mapping.inputs = self.outputs return_mapping.outputs = self.outputs result = result | return_mapping return result @property def n_submodels(self): """ Return the number of components in a single model, which is obviously 1. """ return 1 def _initialize_constraints(self, kwargs): """ Pop parameter constraint values off the keyword arguments passed to `Model.__init__` and store them in private instance attributes. """ # Pop any constraints off the keyword arguments for constraint in self.parameter_constraints: values = kwargs.pop(constraint, {}) for ckey, cvalue in values.items(): param = getattr(self, ckey) setattr(param, constraint, cvalue) self._mconstraints = {} for constraint in self.model_constraints: values = kwargs.pop(constraint, []) self._mconstraints[constraint] = values def _initialize_parameters(self, args, kwargs): """ Initialize the _parameters array that stores raw parameter values for all parameter sets for use with vectorized fitting algorithms; on FittableModels the _param_name attributes actually just reference slices of this array. """ n_models = kwargs.pop("n_models", None) if not ( n_models is None or (isinstance(n_models, (int, np.integer)) and n_models >= 1) ): raise ValueError( "n_models must be either None (in which case it is " "determined from the model_set_axis of the parameter initial " "values) or it must be a positive integer " f"(got {n_models!r})" ) model_set_axis = kwargs.pop("model_set_axis", None) if model_set_axis is None: if n_models is not None and n_models > 1: # Default to zero model_set_axis = 0 else: # Otherwise disable model_set_axis = False else: if not ( model_set_axis is False or np.issubdtype(type(model_set_axis), np.integer) ): raise ValueError( "model_set_axis must be either False or an integer " "specifying the parameter array axis to map to each " f"model in a set of models (got {model_set_axis!r})." ) # Process positional arguments by matching them up with the # corresponding parameters in self.param_names--if any also appear as # keyword arguments this presents a conflict params = set() if len(args) > len(self.param_names): raise TypeError( f"{self.__class__.__name__}.__init__() takes at most " f"{len(self.param_names)} positional arguments ({len(args)} given)" ) self._model_set_axis = model_set_axis self._param_metrics = defaultdict(dict) for idx, arg in enumerate(args): if arg is None: # A value of None implies using the default value, if exists continue # We use quantity_asanyarray here instead of np.asanyarray because # if any of the arguments are quantities, we need to return a # Quantity object not a plain Numpy array. param_name = self.param_names[idx] params.add(param_name) if not isinstance(arg, Parameter): value = quantity_asanyarray(arg, dtype=float) else: value = arg self._initialize_parameter_value(param_name, value) # At this point the only remaining keyword arguments should be # parameter names; any others are in error. for param_name in self.param_names: if param_name in kwargs: if param_name in params: raise TypeError( f"{self.__class__.__name__}.__init__() got multiple values for" f" parameter {param_name!r}" ) value = kwargs.pop(param_name) if value is None: continue # We use quantity_asanyarray here instead of np.asanyarray # because if any of the arguments are quantities, we need # to return a Quantity object not a plain Numpy array. value = quantity_asanyarray(value, dtype=float) params.add(param_name) self._initialize_parameter_value(param_name, value) # Now deal with case where param_name is not supplied by args or kwargs for param_name in self.param_names: if param_name not in params: self._initialize_parameter_value(param_name, None) if kwargs: # If any keyword arguments were left over at this point they are # invalid--the base class should only be passed the parameter # values, constraints, and param_dim for kwarg in kwargs: # Just raise an error on the first unrecognized argument raise TypeError( f"{self.__class__.__name__}.__init__() got an unrecognized" f" parameter {kwarg!r}" ) # Determine the number of model sets: If the model_set_axis is # None then there is just one parameter set; otherwise it is determined # by the size of that axis on the first parameter--if the other # parameters don't have the right number of axes or the sizes of their # model_set_axis don't match an error is raised if model_set_axis is not False and n_models != 1 and params: max_ndim = 0 if model_set_axis < 0: min_ndim = abs(model_set_axis) else: min_ndim = model_set_axis + 1 for name in self.param_names: value = getattr(self, name) param_ndim = np.ndim(value) if param_ndim < min_ndim: raise InputParameterError( "All parameter values must be arrays of dimension at least" f" {min_ndim} for model_set_axis={model_set_axis} (the value" f" given for {name!r} is only {param_ndim}-dimensional)" ) max_ndim = max(max_ndim, param_ndim) if n_models is None: # Use the dimensions of the first parameter to determine # the number of model sets n_models = value.shape[model_set_axis] elif value.shape[model_set_axis] != n_models: raise InputParameterError( f"Inconsistent dimensions for parameter {name!r} for" f" {n_models} model sets. The length of axis" f" {model_set_axis} must be the same for all input parameter" " values" ) self._check_param_broadcast(max_ndim) else: if n_models is None: n_models = 1 self._check_param_broadcast(None) self._n_models = n_models # now validate parameters for name in params: param = getattr(self, name) if param._validator is not None: param._validator(self, param.value) def _initialize_parameter_value(self, param_name, value): """Mostly deals with consistency checks and determining unit issues.""" if isinstance(value, Parameter): self.__dict__[param_name] = value return param = getattr(self, param_name) # Use default if value is not provided if value is None: default = param.default if default is None: # No value was supplied for the parameter and the # parameter does not have a default, therefore the model # is underspecified raise TypeError( f"{self.__class__.__name__}.__init__() requires a value for " f"parameter {param_name!r}" ) value = default unit = param.unit else: if isinstance(value, Quantity): unit = value.unit value = value.value else: unit = None if unit is None and param.unit is not None: raise InputParameterError( f"{self.__class__.__name__}.__init__() requires a Quantity for" f" parameter {param_name!r}" ) param._unit = unit param._set_unit(unit, force=True) param.internal_unit = None if param._setter is not None: if unit is not None: _val = param._setter(value * unit) else: _val = param._setter(value) if isinstance(_val, Quantity): param.internal_unit = _val.unit param._internal_value = np.array(_val.value) else: param.internal_unit = None param._internal_value = np.array(_val) else: param._value = np.array(value) def _initialize_slices(self): param_metrics = self._param_metrics total_size = 0 for name in self.param_names: param = getattr(self, name) value = param.value param_size = np.size(value) param_shape = np.shape(value) param_slice = slice(total_size, total_size + param_size) param_metrics[name]["slice"] = param_slice param_metrics[name]["shape"] = param_shape param_metrics[name]["size"] = param_size total_size += param_size self._parameters = np.empty(total_size, dtype=np.float64) def _parameters_to_array(self): # Now set the parameter values (this will also fill # self._parameters) param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = param.value if not isinstance(value, np.ndarray): value = np.array([value]) self._parameters[param_metrics[name]["slice"]] = value.ravel() # Finally validate all the parameters; we do this last so that # validators that depend on one of the other parameters' values will # work def _array_to_parameters(self): param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = self._parameters[param_metrics[name]["slice"]] value.shape = param_metrics[name]["shape"] param.value = value def _check_param_broadcast(self, max_ndim): """ This subroutine checks that all parameter arrays can be broadcast against each other, and determines the shapes parameters must have in order to broadcast correctly. If model_set_axis is None this merely checks that the parameters broadcast and returns an empty dict if so. This mode is only used for single model sets. """ all_shapes = [] model_set_axis = self._model_set_axis for name in self.param_names: param = getattr(self, name) value = param.value param_shape = np.shape(value) param_ndim = len(param_shape) if max_ndim is not None and param_ndim < max_ndim: # All arrays have the same number of dimensions up to the # model_set_axis dimension, but after that they may have a # different number of trailing axes. The number of trailing # axes must be extended for mutual compatibility. For example # if max_ndim = 3 and model_set_axis = 0, an array with the # shape (2, 2) must be extended to (2, 1, 2). However, an # array with shape (2,) is extended to (2, 1). new_axes = (1,) * (max_ndim - param_ndim) if model_set_axis < 0: # Just need to prepend axes to make up the difference broadcast_shape = new_axes + param_shape else: broadcast_shape = ( param_shape[: model_set_axis + 1] + new_axes + param_shape[model_set_axis + 1 :] ) self._param_metrics[name]["broadcast_shape"] = broadcast_shape all_shapes.append(broadcast_shape) else: all_shapes.append(param_shape) # Now check mutual broadcastability of all shapes try: check_broadcast(*all_shapes) except IncompatibleShapeError as exc: shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args param_a = self.param_names[shape_a_idx] param_b = self.param_names[shape_b_idx] raise InputParameterError( f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with " f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays " "must have shapes that are mutually compatible according " "to the broadcasting rules." ) def _param_sets(self, raw=False, units=False): """ Implementation of the Model.param_sets property. This internal implementation has a ``raw`` argument which controls whether or not to return the raw parameter values (i.e. the values that are actually stored in the ._parameters array, as opposed to the values displayed to users. In most cases these are one in the same but there are currently a few exceptions. Note: This is notably an overcomplicated device and may be removed entirely in the near future. """ values = [] shapes = [] for name in self.param_names: param = getattr(self, name) if raw and param._setter: value = param._internal_value else: value = param.value broadcast_shape = self._param_metrics[name].get("broadcast_shape") if broadcast_shape is not None: value = value.reshape(broadcast_shape) shapes.append(np.shape(value)) if len(self) == 1: # Add a single param set axis to the parameter's value (thus # converting scalars to shape (1,) array values) for # consistency value = np.array([value]) if units: if raw and param.internal_unit is not None: unit = param.internal_unit else: unit = param.unit if unit is not None: value = Quantity(value, unit, subok=True) values.append(value) if len(set(shapes)) != 1 or units: # If the parameters are not all the same shape, converting to an # array is going to produce an object array # However the way Numpy creates object arrays is tricky in that it # will recurse into array objects in the list and break them up # into separate objects. Doing things this way ensures a 1-D # object array the elements of which are the individual parameter # arrays. There's not much reason to do this over returning a list # except for consistency psets = np.empty(len(values), dtype=object) psets[:] = values return psets return np.array(values) def _format_repr(self, args=[], kwargs={}, defaults={}): """ Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting. """ parts = [repr(a) for a in args] parts.extend( f"{name}={param_repr_oneline(getattr(self, name))}" for name in self.param_names ) if self.name is not None: parts.append(f"name={self.name!r}") for kwarg, value in kwargs.items(): if kwarg in defaults and defaults[kwarg] == value: continue parts.append(f"{kwarg}={value!r}") if len(self) > 1: parts.append(f"n_models={len(self)}") return f"<{self.__class__.__name__}({', '.join(parts)})>" def _format_str(self, keywords=[], defaults={}): """ Internal implementation of ``__str__``. This is separated out for ease of use by subclasses that wish to override the default ``__str__`` while keeping the same basic formatting. """ default_keywords = [ ("Model", self.__class__.__name__), ("Name", self.name), ("Inputs", self.inputs), ("Outputs", self.outputs), ("Model set size", len(self)), ] parts = [ f"{keyword}: {value}" for keyword, value in default_keywords if value is not None ] for keyword, value in keywords: if keyword.lower() in defaults and defaults[keyword.lower()] == value: continue parts.append(f"{keyword}: {value}") parts.append("Parameters:") if len(self) == 1: columns = [[getattr(self, name).value] for name in self.param_names] else: columns = [getattr(self, name).value for name in self.param_names] if columns: param_table = Table(columns, names=self.param_names) # Set units on the columns for name in self.param_names: param_table[name].unit = getattr(self, name).unit parts.append(indent(str(param_table), width=4)) return "\n".join(parts) class FittableModel(Model): """ Base class for models that can be fitted using the built-in fitting algorithms. """ linear = False # derivative with respect to parameters fit_deriv = None """ Function (similar to the model's `~Model.evaluate`) to compute the derivatives of the model with respect to its parameters, for use by fitting algorithms. In other words, this computes the Jacobian matrix with respect to the model's parameters. """ # Flag that indicates if the model derivatives with respect to parameters # are given in columns or rows col_fit_deriv = True fittable = True class Fittable1DModel(FittableModel): """ Base class for one-dimensional fittable models. This class provides an easier interface to defining new models. Examples can be found in `astropy.modeling.functional_models`. """ n_inputs = 1 n_outputs = 1 _separable = True class Fittable2DModel(FittableModel): """ Base class for two-dimensional fittable models. This class provides an easier interface to defining new models. Examples can be found in `astropy.modeling.functional_models`. """ n_inputs = 2 n_outputs = 1 def _make_arithmetic_operator(oper): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g def op(f, g): return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2]) return op def _composition_operator(f, g): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g return (lambda inputs, params: g[0](f[0](inputs, params), params), f[1], g[2]) def _join_operator(f, g): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g return ( lambda inputs, params: ( f[0](inputs[: f[1]], params) + g[0](inputs[f[1] :], params) ), f[1] + g[1], f[2] + g[2], ) BINARY_OPERATORS = { "+": _make_arithmetic_operator(operator.add), "-": _make_arithmetic_operator(operator.sub), "*": _make_arithmetic_operator(operator.mul), "/": _make_arithmetic_operator(operator.truediv), "**": _make_arithmetic_operator(operator.pow), "|": _composition_operator, "&": _join_operator, } SPECIAL_OPERATORS = _SpecialOperatorsDict() def _add_special_operator(sop_name, sop): return SPECIAL_OPERATORS.add(sop_name, sop) class CompoundModel(Model): """ Base class for compound models. While it can be used directly, the recommended way to combine models is through the model operators. """ def __init__(self, op, left, right, name=None): self.__dict__["_param_names"] = None self._n_submodels = None self.op = op self.left = left self.right = right self._bounding_box = None self._user_bounding_box = None self._leaflist = None self._tdict = None self._parameters = None self._parameters_ = None self._param_metrics = None if op != "fix_inputs" and len(left) != len(right): raise ValueError("Both operands must have equal values for n_models") self._n_models = len(left) if op != "fix_inputs" and ( (left.model_set_axis != right.model_set_axis) or left.model_set_axis ): # not False and not 0 raise ValueError( "model_set_axis must be False or 0 and consistent for operands" ) self._model_set_axis = left.model_set_axis if op in ["+", "-", "*", "/", "**"] or op in SPECIAL_OPERATORS: if left.n_inputs != right.n_inputs or left.n_outputs != right.n_outputs: raise ModelDefinitionError( "Both operands must match numbers of inputs and outputs" ) self.n_inputs = left.n_inputs self.n_outputs = left.n_outputs self.inputs = left.inputs self.outputs = left.outputs elif op == "&": self.n_inputs = left.n_inputs + right.n_inputs self.n_outputs = left.n_outputs + right.n_outputs self.inputs = combine_labels(left.inputs, right.inputs) self.outputs = combine_labels(left.outputs, right.outputs) elif op == "|": if left.n_outputs != right.n_inputs: raise ModelDefinitionError( "Unsupported operands for |:" f" {left.name} (n_inputs={left.n_inputs}," f" n_outputs={left.n_outputs}) and" f" {right.name} (n_inputs={right.n_inputs}," f" n_outputs={right.n_outputs}); n_outputs for the left-hand model" " must match n_inputs for the right-hand model." ) self.n_inputs = left.n_inputs self.n_outputs = right.n_outputs self.inputs = left.inputs self.outputs = right.outputs elif op == "fix_inputs": if not isinstance(left, Model): raise ValueError( 'First argument to "fix_inputs" must be an instance of ' "an astropy Model." ) if not isinstance(right, dict): raise ValueError( 'Expected a dictionary for second argument of "fix_inputs".' ) # Dict keys must match either possible indices # for model on left side, or names for inputs. self.n_inputs = left.n_inputs - len(right) # Assign directly to the private attribute (instead of using the setter) # to avoid asserting the new number of outputs matches the old one. self._outputs = left.outputs self.n_outputs = left.n_outputs newinputs = list(left.inputs) keys = right.keys() input_ind = [] for key in keys: if np.issubdtype(type(key), np.integer): if key >= left.n_inputs or key < 0: raise ValueError( "Substitution key integer value " "not among possible input choices." ) if key in input_ind: raise ValueError( "Duplicate specification of same input (index/name)." ) input_ind.append(key) elif isinstance(key, str): if key not in left.inputs: raise ValueError( "Substitution key string not among possible input choices." ) # Check to see it doesn't match positional # specification. ind = left.inputs.index(key) if ind in input_ind: raise ValueError( "Duplicate specification of same input (index/name)." ) input_ind.append(ind) # Remove substituted inputs input_ind.sort() input_ind.reverse() for ind in input_ind: del newinputs[ind] self.inputs = tuple(newinputs) # Now check to see if the input model has bounding_box defined. # If so, remove the appropriate dimensions and set it for this # instance. try: self.bounding_box = self.left.bounding_box.fix_inputs(self, right) except NotImplementedError: pass else: raise ModelDefinitionError("Illegal operator: ", self.op) self.name = name self._fittable = None self.fit_deriv = None self.col_fit_deriv = None if op in ("|", "+", "-"): self.linear = left.linear and right.linear else: self.linear = False self.eqcons = [] self.ineqcons = [] self.n_left_params = len(self.left.parameters) self._map_parameters() def _get_left_inputs_from_args(self, args): return args[: self.left.n_inputs] def _get_right_inputs_from_args(self, args): op = self.op if op == "&": # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs : self.left.n_inputs + self.right.n_inputs] elif op == "|" or op == "fix_inputs": return None else: return args[: self.left.n_inputs] def _get_left_params_from_args(self, args): op = self.op if op == "&": # Args expected to look like (*left inputs, *right inputs, *left params, *right params) n_inputs = self.left.n_inputs + self.right.n_inputs return args[n_inputs : n_inputs + self.n_left_params] else: return args[self.left.n_inputs : self.left.n_inputs + self.n_left_params] def _get_right_params_from_args(self, args): op = self.op if op == "fix_inputs": return None if op == "&": # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params :] else: return args[self.left.n_inputs + self.n_left_params :] def _get_kwarg_model_parameters_as_positional(self, args, kwargs): # could do it with inserts but rebuilding seems like simpilist way # TODO: Check if any param names are in kwargs maybe as an intersection of sets? if self.op == "&": new_args = list(args[: self.left.n_inputs + self.right.n_inputs]) args_pos = self.left.n_inputs + self.right.n_inputs else: new_args = list(args[: self.left.n_inputs]) args_pos = self.left.n_inputs for param_name in self.param_names: kw_value = kwargs.pop(param_name, None) if kw_value is not None: value = kw_value else: try: value = args[args_pos] except IndexError: raise IndexError("Missing parameter or input") args_pos += 1 new_args.append(value) return new_args, kwargs def _apply_operators_to_value_lists(self, leftval, rightval, **kw): op = self.op if op == "+": return binary_operation(operator.add, leftval, rightval) elif op == "-": return binary_operation(operator.sub, leftval, rightval) elif op == "*": return binary_operation(operator.mul, leftval, rightval) elif op == "/": return binary_operation(operator.truediv, leftval, rightval) elif op == "**": return binary_operation(operator.pow, leftval, rightval) elif op == "&": if not isinstance(leftval, tuple): leftval = (leftval,) if not isinstance(rightval, tuple): rightval = (rightval,) return leftval + rightval elif op in SPECIAL_OPERATORS: return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval) else: raise ModelDefinitionError("Unrecognized operator {op}") def evaluate(self, *args, **kw): op = self.op args, kw = self._get_kwarg_model_parameters_as_positional(args, kw) left_inputs = self._get_left_inputs_from_args(args) left_params = self._get_left_params_from_args(args) if op == "fix_inputs": pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs))) fixed_inputs = { key if np.issubdtype(type(key), np.integer) else pos_index[key]: value for key, value in self.right.items() } left_inputs = [ fixed_inputs[ind] if ind in fixed_inputs.keys() else inp for ind, inp in enumerate(left_inputs) ] leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params)) if op == "fix_inputs": return leftval right_inputs = self._get_right_inputs_from_args(args) right_params = self._get_right_params_from_args(args) if op == "|": if isinstance(leftval, tuple): return self.right.evaluate(*itertools.chain(leftval, right_params)) else: return self.right.evaluate(leftval, *right_params) else: rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params)) return self._apply_operators_to_value_lists(leftval, rightval, **kw) @property def n_submodels(self): if self._leaflist is None: self._make_leaflist() return len(self._leaflist) @property def submodel_names(self): """Return the names of submodels in a ``CompoundModel``.""" if self._leaflist is None: self._make_leaflist() names = [item.name for item in self._leaflist] nonecount = 0 newnames = [] for item in names: if item is None: newnames.append(f"None_{nonecount}") nonecount += 1 else: newnames.append(item) return tuple(newnames) def both_inverses_exist(self): """ if both members of this compound model have inverses return True """ import warnings from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( "CompoundModel.both_inverses_exist is deprecated. Use has_inverse instead.", AstropyDeprecationWarning, ) try: self.left.inverse self.right.inverse except NotImplementedError: return False return True def _pre_evaluate(self, *args, **kwargs): """ CompoundModel specific input setup that needs to occur prior to model evaluation. Note ---- All of the _pre_evaluate for each component model will be performed at the time that the individual model is evaluated. """ # If equivalencies are provided, necessary to map parameters and pass # the leaflist as a keyword input for use by model evaluation so that # the compound model input names can be matched to the model input # names. if "equivalencies" in kwargs: # Restructure to be useful for the individual model lookup kwargs["inputs_map"] = [ (value[0], (value[1], key)) for key, value in self.inputs_map().items() ] # Setup actual model evaluation method def evaluate(_inputs): return self._evaluate(*_inputs, **kwargs) return evaluate, args, None, kwargs @property def _argnames(self): """ No inputs should be used to determine input_shape when handling compound models """ return () def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ CompoundModel specific post evaluation processing of outputs Note ---- All of the _post_evaluate for each component model will be performed at the time that the individual model is evaluated. """ if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1: return outputs[0] return outputs def _evaluate(self, *args, **kw): op = self.op if op != "fix_inputs": if op != "&": leftval = self.left(*args, **kw) if op != "|": rightval = self.right(*args, **kw) else: rightval = None else: leftval = self.left(*(args[: self.left.n_inputs]), **kw) rightval = self.right(*(args[self.left.n_inputs :]), **kw) if op != "|": return self._apply_operators_to_value_lists(leftval, rightval, **kw) elif op == "|": if isinstance(leftval, tuple): return self.right(*leftval, **kw) else: return self.right(leftval, **kw) else: subs = self.right newargs = list(args) subinds = [] subvals = [] for key in subs.keys(): if np.issubdtype(type(key), np.integer): subinds.append(key) elif isinstance(key, str): ind = self.left.inputs.index(key) subinds.append(ind) subvals.append(subs[key]) # Turn inputs specified in kw into positional indices. # Names for compound inputs do not propagate to sub models. kwind = [] kwval = [] for kwkey in list(kw.keys()): if kwkey in self.inputs: ind = self.inputs.index(kwkey) if ind < len(args): raise ValueError( "Keyword argument duplicates positional value supplied." ) kwind.append(ind) kwval.append(kw[kwkey]) del kw[kwkey] # Build new argument list # Append keyword specified args first if kwind: kwargs = list(zip(kwind, kwval)) kwargs.sort() kwindsorted, kwvalsorted = list(zip(*kwargs)) newargs = newargs + list(kwvalsorted) if subinds: subargs = list(zip(subinds, subvals)) subargs.sort() # subindsorted, subvalsorted = list(zip(*subargs)) # The substitutions must be inserted in order for ind, val in subargs: newargs.insert(ind, val) return self.left(*newargs, **kw) @property def param_names(self): """An ordered list of parameter names.""" return self._param_names def _make_leaflist(self): tdict = {} leaflist = [] make_subtree_dict(self, "", tdict, leaflist) self._leaflist = leaflist self._tdict = tdict def __getattr__(self, name): """ If someone accesses an attribute not already defined, map the parameters, and then see if the requested attribute is one of the parameters """ # The following test is needed to avoid infinite recursion # caused by deepcopy. There may be other such cases discovered. if name == "__setstate__": raise AttributeError if name in self._param_names: return self.__dict__[name] else: raise AttributeError(f'Attribute "{name}" not found') def __getitem__(self, index): if self._leaflist is None: self._make_leaflist() leaflist = self._leaflist tdict = self._tdict if isinstance(index, slice): if index.step: raise ValueError("Steps in slices not supported for compound models") if index.start is not None: if isinstance(index.start, str): start = self._str_index_to_int(index.start) else: start = index.start else: start = 0 if index.stop is not None: if isinstance(index.stop, str): stop = self._str_index_to_int(index.stop) else: stop = index.stop - 1 else: stop = len(leaflist) - 1 if index.stop == 0: raise ValueError("Slice endpoint cannot be 0") if start < 0: start = len(leaflist) + start if stop < 0: stop = len(leaflist) + stop # now search for matching node: if stop == start: # only single value, get leaf instead in code below index = start else: for key in tdict: node, leftind, rightind = tdict[key] if leftind == start and rightind == stop: return node raise IndexError("No appropriate subtree matches slice") if np.issubdtype(type(index), np.integer): return leaflist[index] elif isinstance(index, str): return leaflist[self._str_index_to_int(index)] else: raise TypeError("index must be integer, slice, or model name string") def _str_index_to_int(self, str_index): # Search through leaflist for item with that name found = [] for nleaf, leaf in enumerate(self._leaflist): if getattr(leaf, "name", None) == str_index: found.append(nleaf) if len(found) == 0: raise IndexError(f"No component with name '{str_index}' found") if len(found) > 1: raise IndexError( f"Multiple components found using '{str_index}' as name\n" f"at indices {found}" ) return found[0] @property def n_inputs(self): """The number of inputs of a model.""" return self._n_inputs @n_inputs.setter def n_inputs(self, value): self._n_inputs = value @property def n_outputs(self): """The number of outputs of a model.""" return self._n_outputs @n_outputs.setter def n_outputs(self, value): self._n_outputs = value @property def eqcons(self): return self._eqcons @eqcons.setter def eqcons(self, value): self._eqcons = value @property def ineqcons(self): return self._eqcons @ineqcons.setter def ineqcons(self, value): self._eqcons = value def traverse_postorder(self, include_operator=False): """Postorder traversal of the CompoundModel tree.""" res = [] if isinstance(self.left, CompoundModel): res = res + self.left.traverse_postorder(include_operator) else: res = res + [self.left] if isinstance(self.right, CompoundModel): res = res + self.right.traverse_postorder(include_operator) else: res = res + [self.right] if include_operator: res.append(self.op) else: res.append(self) return res def _format_expression(self, format_leaf=None): leaf_idx = 0 operands = deque() if format_leaf is None: format_leaf = lambda i, l: f"[{i}]" for node in self.traverse_postorder(): if not isinstance(node, CompoundModel): operands.append(format_leaf(leaf_idx, node)) leaf_idx += 1 continue right = operands.pop() left = operands.pop() if node.op in OPERATOR_PRECEDENCE: oper_order = OPERATOR_PRECEDENCE[node.op] if isinstance(node, CompoundModel): if ( isinstance(node.left, CompoundModel) and OPERATOR_PRECEDENCE[node.left.op] < oper_order ): left = f"({left})" if ( isinstance(node.right, CompoundModel) and OPERATOR_PRECEDENCE[node.right.op] < oper_order ): right = f"({right})" operands.append(" ".join((left, node.op, right))) else: left = f"(({left})," right = f"({right}))" operands.append(" ".join((node.op[0], left, right))) return "".join(operands) def _format_components(self): if self._parameters_ is None: self._map_parameters() return "\n\n".join(f"[{idx}]: {m!r}" for idx, m in enumerate(self._leaflist)) def __str__(self): expression = self._format_expression() components = self._format_components() keywords = [ ("Expression", expression), ("Components", "\n" + indent(components)), ] return super()._format_str(keywords=keywords) def rename(self, name): self.name = name return self @property def isleaf(self): return False @property def inverse(self): if self.op == "|": return self.right.inverse | self.left.inverse elif self.op == "&": return self.left.inverse & self.right.inverse else: return NotImplemented @property def fittable(self): """Set the fittable attribute on a compound model.""" if self._fittable is None: if self._leaflist is None: self._map_parameters() self._fittable = all(m.fittable for m in self._leaflist) return self._fittable __add__ = _model_oper("+") __sub__ = _model_oper("-") __mul__ = _model_oper("*") __truediv__ = _model_oper("/") __pow__ = _model_oper("**") __or__ = _model_oper("|") __and__ = _model_oper("&") def _map_parameters(self): """ Map all the constituent model parameters to the compound object, renaming as necessary by appending a suffix number. This can be an expensive operation, particularly for a complex expression tree. All the corresponding parameter attributes are created that one expects for the Model class. The parameter objects that the attributes point to are the same objects as in the constiutent models. Changes made to parameter values to either are seen by both. Prior to calling this, none of the associated attributes will exist. This method must be called to make the model usable by fitting engines. If oldnames=True, then parameters are named as in the original implementation of compound models. """ if self._parameters is not None: # do nothing return if self._leaflist is None: self._make_leaflist() self._parameters_ = {} param_map = {} self._param_names = [] for lindex, leaf in enumerate(self._leaflist): if not isinstance(leaf, dict): for param_name in leaf.param_names: param = getattr(leaf, param_name) new_param_name = f"{param_name}_{lindex}" self.__dict__[new_param_name] = param self._parameters_[new_param_name] = param self._param_names.append(new_param_name) param_map[new_param_name] = (lindex, param_name) self._param_metrics = {} self._param_map = param_map self._param_map_inverse = {v: k for k, v in param_map.items()} self._initialize_slices() self._param_names = tuple(self._param_names) def _initialize_slices(self): param_metrics = self._param_metrics total_size = 0 for name in self.param_names: param = getattr(self, name) value = param.value param_size = np.size(value) param_shape = np.shape(value) param_slice = slice(total_size, total_size + param_size) param_metrics[name] = {} param_metrics[name]["slice"] = param_slice param_metrics[name]["shape"] = param_shape param_metrics[name]["size"] = param_size total_size += param_size self._parameters = np.empty(total_size, dtype=np.float64) @staticmethod def _recursive_lookup(branch, adict, key): if isinstance(branch, CompoundModel): return adict[key] return branch, key def inputs_map(self): """ Map the names of the inputs to this ExpressionTree to the inputs to the leaf models. """ inputs_map = {} if not isinstance( self.op, str ): # If we don't have an operator the mapping is trivial return {inp: (self, inp) for inp in self.inputs} elif self.op == "|": if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp elif self.op == "&": if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() if isinstance(self.right, CompoundModel): r_inputs_map = self.right.inputs_map() for i, inp in enumerate(self.inputs): if i < len(self.left.inputs): # Get from left if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[self.left.inputs[i]] else: inputs_map[inp] = self.left, self.left.inputs[i] else: # Get from right if isinstance(self.right, CompoundModel): inputs_map[inp] = r_inputs_map[ self.right.inputs[i - len(self.left.inputs)] ] else: inputs_map[inp] = ( self.right, self.right.inputs[i - len(self.left.inputs)], ) elif self.op == "fix_inputs": fixed_ind = list(self.right.keys()) ind = [ list(self.left.inputs).index(i) if isinstance(i, str) else i for i in fixed_ind ] inp_ind = list(range(self.left.n_inputs)) for i in ind: inp_ind.remove(i) for i in inp_ind: inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i] else: if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.left.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp return inputs_map def _parameter_units_for_data_units(self, input_units, output_units): if self._leaflist is None: self._map_parameters() units_for_data = {} for imodel, model in enumerate(self._leaflist): units_for_data_leaf = model._parameter_units_for_data_units( input_units, output_units ) for param_leaf in units_for_data_leaf: param = self._param_map_inverse[(imodel, param_leaf)] units_for_data[param] = units_for_data_leaf[param_leaf] return units_for_data @property def input_units(self): inputs_map = self.inputs_map() input_units_dict = { key: inputs_map[key][0].input_units[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units is not None } if input_units_dict: return input_units_dict return None @property def input_units_equivalencies(self): inputs_map = self.inputs_map() input_units_equivalencies_dict = { key: inputs_map[key][0].input_units_equivalencies[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units_equivalencies is not None } if not input_units_equivalencies_dict: return None return input_units_equivalencies_dict @property def input_units_allow_dimensionless(self): inputs_map = self.inputs_map() return { key: inputs_map[key][0].input_units_allow_dimensionless[orig_key] for key, (mod, orig_key) in inputs_map.items() } @property def input_units_strict(self): inputs_map = self.inputs_map() return { key: inputs_map[key][0].input_units_strict[orig_key] for key, (mod, orig_key) in inputs_map.items() } @property def return_units(self): outputs_map = self.outputs_map() return { key: outputs_map[key][0].return_units[orig_key] for key, (mod, orig_key) in outputs_map.items() if outputs_map[key][0].return_units is not None } def outputs_map(self): """ Map the names of the outputs to this ExpressionTree to the outputs to the leaf models. """ outputs_map = {} if not isinstance( self.op, str ): # If we don't have an operator the mapping is trivial return {out: (self, out) for out in self.outputs} elif self.op == "|": if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for out in self.outputs: if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[out] else: outputs_map[out] = self.right, out elif self.op == "&": if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for i, out in enumerate(self.outputs): if i < len(self.left.outputs): # Get from left if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map[self.left.outputs[i]] else: outputs_map[out] = self.left, self.left.outputs[i] else: # Get from right if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[ self.right.outputs[i - len(self.left.outputs)] ] else: outputs_map[out] = ( self.right, self.right.outputs[i - len(self.left.outputs)], ) elif self.op == "fix_inputs": return self.left.outputs_map() else: if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() for out in self.left.outputs: if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map()[out] else: outputs_map[out] = self.left, out return outputs_map @property def has_user_bounding_box(self): """ A flag indicating whether or not a custom bounding_box has been assigned to this model by a user, via assignment to ``model.bounding_box``. """ return self._user_bounding_box is not None def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = self.get_bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError("If no bounding_box is set, coords or out must be input.") # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError("inconsistent shape of the output.") else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError( "the array and model must have the same number of dimensions." ) if bbox is not None: # Assures position is at center pixel, important when using # add_array. pd = ( np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]) .astype(int) .T ) pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array( [extract_array(c, sub_shape, pos) for c in coords] ) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( "The `bounding_box` is larger than the input out in " "one or more dimensions. Set " "`model.bounding_box = None`." ) else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out def replace_submodel(self, name, model): """ Construct a new `~astropy.modeling.CompoundModel` instance from an existing CompoundModel, replacing the named submodel with a new model. In order to ensure that inverses and names are kept/reconstructed, it's necessary to rebuild the CompoundModel from the replaced node all the way back to the base. The original CompoundModel is left untouched. Parameters ---------- name : str name of submodel to be replaced model : `~astropy.modeling.Model` replacement model """ submodels = [ m for m in self.traverse_postorder() if getattr(m, "name", None) == name ] if submodels: if len(submodels) > 1: raise ValueError(f"More than one submodel named {name}") old_model = submodels.pop() if len(old_model) != len(model): raise ValueError( "New and old models must have equal values for n_models" ) # Do this check first in order to raise a more helpful Exception, # although it would fail trying to construct the new CompoundModel if ( old_model.n_inputs != model.n_inputs or old_model.n_outputs != model.n_outputs ): raise ValueError( "New model must match numbers of inputs and " "outputs of existing model" ) tree = _get_submodel_path(self, name) while tree: branch = self.copy() for node in tree[:-1]: branch = getattr(branch, node) setattr(branch, tree[-1], model) model = CompoundModel( branch.op, branch.left, branch.right, name=branch.name ) tree = tree[:-1] return model else: raise ValueError(f"No submodels found named {name}") def _set_sub_models_and_parameter_units(self, left, right): """ Provides a work-around to properly set the sub models and respective parameters's units/values when using ``without_units_for_data`` or ``without_units_for_data`` methods. """ model = CompoundModel(self.op, left, right) self.left = left self.right = right for name in model.param_names: model_parameter = getattr(model, name) parameter = getattr(self, name) parameter.value = model_parameter.value parameter._set_unit(model_parameter.unit, force=True) def without_units_for_data(self, **kwargs): """ See `~astropy.modeling.Model.without_units_for_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound * and / compound models as in that case it is reasonable to mix the output units. It does this by modifying the output units of each sub model by using the output units of the other sub model so that we can apply the original function and get the desired result. Additional data has to be output in the mixed output unit case so that the units can be properly rebuilt by `~astropy.modeling.CompoundModel.with_units_from_data`. Outside the mixed output units, this method is identical to the base method. """ if self.op in ["*", "/"]: model = self.copy() inputs = {inp: kwargs[inp] for inp in self.inputs} left_units = self.left.output_units(**kwargs) right_units = self.right.output_units(**kwargs) if self.op == "*": left_kwargs = { out: kwargs[out] / right_units[out] for out in self.left.outputs if kwargs[out] is not None } right_kwargs = { out: kwargs[out] / left_units[out] for out in self.right.outputs if kwargs[out] is not None } else: left_kwargs = { out: kwargs[out] * right_units[out] for out in self.left.outputs if kwargs[out] is not None } right_kwargs = { out: 1 / kwargs[out] * left_units[out] for out in self.right.outputs if kwargs[out] is not None } left_kwargs.update(inputs.copy()) right_kwargs.update(inputs.copy()) left = self.left.without_units_for_data(**left_kwargs) if isinstance(left, tuple): left_kwargs["_left_kwargs"] = left[1] left_kwargs["_right_kwargs"] = left[2] left = left[0] right = self.right.without_units_for_data(**right_kwargs) if isinstance(right, tuple): right_kwargs["_left_kwargs"] = right[1] right_kwargs["_right_kwargs"] = right[2] right = right[0] model._set_sub_models_and_parameter_units(left, right) return model, left_kwargs, right_kwargs else: return super().without_units_for_data(**kwargs) def with_units_from_data(self, **kwargs): """ See `~astropy.modeling.Model.with_units_from_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound * and / compound models as in that case it is reasonable to mix the output units. In order to do this it requires some additional information output by `~astropy.modeling.CompoundModel.without_units_for_data` passed as keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``. Outside the mixed output units, this method is identical to the base method. """ if self.op in ["*", "/"]: left_kwargs = kwargs.pop("_left_kwargs") right_kwargs = kwargs.pop("_right_kwargs") left = self.left.with_units_from_data(**left_kwargs) right = self.right.with_units_from_data(**right_kwargs) model = self.copy() model._set_sub_models_and_parameter_units(left, right) return model else: return super().with_units_from_data(**kwargs) def _get_submodel_path(model, name): """Find the route down a CompoundModel's tree to the model with the specified name (whether it's a leaf or not)""" if getattr(model, "name", None) == name: return [] try: return ["left"] + _get_submodel_path(model.left, name) except (AttributeError, TypeError): pass try: return ["right"] + _get_submodel_path(model.right, name) except (AttributeError, TypeError): pass def binary_operation(binoperator, left, right): """ Perform binary operation. Operands may be matching tuples of operands. """ if isinstance(left, tuple) and isinstance(right, tuple): return tuple(binoperator(item[0], item[1]) for item in zip(left, right)) return binoperator(left, right) def get_ops(tree, opset): """ Recursive function to collect operators used. """ if isinstance(tree, CompoundModel): opset.add(tree.op) get_ops(tree.left, opset) get_ops(tree.right, opset) else: return def make_subtree_dict(tree, nodepath, tdict, leaflist): """ Traverse a tree noting each node by a key that indicates all the left/right choices necessary to reach that node. Each key will reference a tuple that contains: - reference to the compound model for that node. - left most index contained within that subtree (relative to all indices for the whole tree) - right most index contained within that subtree """ # if this is a leaf, just append it to the leaflist if not hasattr(tree, "isleaf"): leaflist.append(tree) else: leftmostind = len(leaflist) make_subtree_dict(tree.left, nodepath + "l", tdict, leaflist) make_subtree_dict(tree.right, nodepath + "r", tdict, leaflist) rightmostind = len(leaflist) - 1 tdict[nodepath] = (tree, leftmostind, rightmostind) _ORDER_OF_OPERATORS = [("fix_inputs",), ("|",), ("&",), ("+", "-"), ("*", "/"), ("**",)] OPERATOR_PRECEDENCE = {} for idx, ops in enumerate(_ORDER_OF_OPERATORS): for op in ops: OPERATOR_PRECEDENCE[op] = idx del idx, op, ops def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None): """ This function creates a compound model with one or more of the input values of the input model assigned fixed values (scalar or array). Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that one or more of the model input values will be fixed to some constant value. values : dict A dictionary where the key identifies which input to fix and its value is the value to fix it at. The key may either be the name of the input or a number reflecting its order in the inputs. Examples -------- >>> from astropy.modeling.models import Gaussian2D >>> g = Gaussian2D(1, 2, 3, 4, 5) >>> gv = fix_inputs(g, {0: 2.5}) Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y) """ model = CompoundModel("fix_inputs", modelinstance, values) if bounding_boxes is not None: if selector_args is None: selector_args = tuple((key, True) for key in values.keys()) bbox = CompoundBoundingBox.validate( modelinstance, bounding_boxes, selector_args ) _selector = bbox.selector_args.get_fixed_values(modelinstance, values) new_bbox = bbox[_selector] new_bbox = new_bbox.__class__.validate(model, new_bbox) model.bounding_box = new_bbox return model def bind_bounding_box(modelinstance, bounding_box, ignored=None, order="C"): """ Set a validated bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated bounding box will be set on. bounding_box : tuple A bounding box tuple, see :ref:`astropy:bounding-boxes` for details ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = ModelBoundingBox.validate( modelinstance, bounding_box, ignored=ignored, order=order ) def bind_compound_bounding_box( modelinstance, bounding_boxes, selector_args, create_selector=None, ignored=None, order="C", ): """ Add a validated compound bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated compound bounding box will be set on. bounding_boxes : dict A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes` for details. selector_args : list List of selector argument tuples to define selection for compound bounding box, see :ref:`astropy:bounding-boxes` for details. create_selector : callable, optional An optional callable with interface (selector_value, model) which can generate a bounding box based on a selector value and model if there is no bounding box in the compound bounding box listed under that selector value. Default is ``None``, meaning new bounding box entries will not be automatically generated. ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = CompoundBoundingBox.validate( modelinstance, bounding_boxes, selector_args, create_selector=create_selector, ignored=ignored, order=order, ) def custom_model(*args, fit_deriv=None): """ Create a model from a user defined function. The inputs and parameters of the model will be inferred from the arguments of the function. This can be used either as a function or as a decorator. See below for examples of both usages. The model is separable only if there is a single input. .. note:: All model parameters have to be defined as keyword arguments with default values in the model function. Use `None` as a default argument value if you do not want to have a default value for that parameter. The standard settable model properties can be configured by default using keyword arguments matching the name of the property; however, these values are not set as model "parameters". Moreover, users cannot use keyword arguments matching non-settable model properties, with the exception of ``n_outputs`` which should be set to the number of outputs of your function. Parameters ---------- func : function Function which defines the model. It should take N positional arguments where ``N`` is dimensions of the model (the number of independent variable in the model), and any number of keyword arguments (the parameters). It must return the value of the model (typically as an array, but can also be a scalar for scalar inputs). This corresponds to the `~astropy.modeling.Model.evaluate` method. fit_deriv : function, optional Function which defines the Jacobian derivative of the model. I.e., the derivative with respect to the *parameters* of the model. It should have the same argument signature as ``func``, but should return a sequence where each element of the sequence is the derivative with respect to the corresponding argument. This corresponds to the :meth:`~astropy.modeling.FittableModel.fit_deriv` method. Examples -------- Define a sinusoidal model function as a custom 1D model:: >>> from astropy.modeling.models import custom_model >>> import numpy as np >>> def sine_model(x, amplitude=1., frequency=1.): ... return amplitude * np.sin(2 * np.pi * frequency * x) >>> def sine_deriv(x, amplitude=1., frequency=1.): ... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x) >>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv) Create an instance of the custom model and evaluate it:: >>> model = SineModel() >>> model(0.25) 1.0 This model instance can now be used like a usual astropy model. The next example demonstrates a 2D Moffat function model, and also demonstrates the support for docstrings (this example could also include a derivative, but it has been omitted for simplicity):: >>> @custom_model ... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0, ... alpha=1.0): ... \"\"\"Two dimensional Moffat function.\"\"\" ... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2 ... return amplitude * (1 + rr_gg) ** (-alpha) ... >>> print(Moffat2D.__doc__) Two dimensional Moffat function. >>> model = Moffat2D() >>> model(1, 1) # doctest: +FLOAT_CMP 0.3333333333333333 """ if len(args) == 1 and callable(args[0]): return _custom_model_wrapper(args[0], fit_deriv=fit_deriv) elif not args: return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv) else: raise TypeError( f"{__name__} takes at most one positional argument (the callable/" "function to be turned into a model. When used as a decorator " "it should be passed keyword arguments only (if " "any)." ) def _custom_model_inputs(func): """ Processes the inputs to the `custom_model`'s function into the appropriate categories. Parameters ---------- func : callable Returns ------- inputs : list list of evaluation inputs special_params : dict dictionary of model properties which require special treatment settable_params : dict dictionary of defaults for settable model properties params : dict dictionary of model parameters set by `custom_model`'s function """ inputs, parameters = get_inputs_and_params(func) special = ["n_outputs"] settable = [ attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is not None ] properties = [ attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is None and attr not in special ] special_params = {} settable_params = {} params = {} for param in parameters: if param.name in special: special_params[param.name] = param.default elif param.name in settable: settable_params[param.name] = param.default elif param.name in properties: raise ValueError( f"Parameter '{param.name}' cannot be a model property: {properties}." ) else: params[param.name] = param.default return inputs, special_params, settable_params, params def _custom_model_wrapper(func, fit_deriv=None): """ Internal implementation `custom_model`. When `custom_model` is called as a function its arguments are passed to this function, and the result of this function is returned. When `custom_model` is used as a decorator a partial evaluation of this function is returned by `custom_model`. """ if not callable(func): raise ModelDefinitionError( "func is not callable; it must be a function or other callable object" ) if fit_deriv is not None and not callable(fit_deriv): raise ModelDefinitionError( "fit_deriv not callable; it must be a function or other callable object" ) model_name = func.__name__ inputs, special_params, settable_params, params = _custom_model_inputs(func) if fit_deriv is not None and len(fit_deriv.__defaults__) != len(params): raise ModelDefinitionError( "derivative function should accept same number of parameters as func." ) params = { param: Parameter(param, default=default) for param, default in params.items() } mod = find_current_module(2) if mod: modname = mod.__name__ else: modname = "__main__" members = { "__module__": str(modname), "__doc__": func.__doc__, "n_inputs": len(inputs), "n_outputs": special_params.pop("n_outputs", 1), "evaluate": staticmethod(func), "_settable_properties": settable_params, } if fit_deriv is not None: members["fit_deriv"] = staticmethod(fit_deriv) members.update(params) cls = type(model_name, (FittableModel,), members) cls._separable = True if (len(inputs) == 1) else False return cls def render_model(model, arr=None, coords=None): """ Evaluates a model on an input array. Evaluation is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- model : `Model` Model to be evaluated. arr : `numpy.ndarray`, optional Array on which the model is evaluated. coords : array-like, optional Coordinate arrays mapping to ``arr``, such that ``arr[coords] == arr``. Returns ------- array : `numpy.ndarray` The model evaluated on the input ``arr`` or a new array from ``coords``. If ``arr`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = model.bounding_box if (coords is None) & (arr is None) & (bbox is None): raise ValueError("If no bounding_box is set, coords or arr must be input.") # for consistent indexing if model.n_inputs == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if arr is not None: arr = arr.copy() # Check dimensions match model if arr.ndim != model.n_inputs: raise ValueError( "number of array dimensions inconsistent with number of model inputs." ) if coords is not None: # Check dimensions match arr and model coords = np.array(coords) if len(coords) != model.n_inputs: raise ValueError( "coordinate length inconsistent with the number of model inputs." ) if arr is not None: if coords[0].shape != arr.shape: raise ValueError("coordinate shape inconsistent with the array shape.") else: arr = np.zeros(coords[0].shape) if bbox is not None: # assures position is at center pixel, important when using add_array pd = pos, delta = ( np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]) .astype(int) .T ) if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords]) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if arr is None: arr = model(*sub_coords) else: try: arr = add_array(arr, model(*sub_coords), pos) except ValueError: raise ValueError( "The `bounding_box` is larger than the input" " arr in one or more dimensions. Set " "`model.bounding_box = None`." ) else: if coords is None: im_shape = arr.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] arr += model(*coords[::-1]) return arr def hide_inverse(model): """ This is a convenience function intended to disable automatic generation of the inverse in compound models by disabling one of the constituent model's inverse. This is to handle cases where user provided inverse functions are not compatible within an expression. Example: compound_model.inverse = hide_inverse(m1) + m2 + m3 This will insure that the defined inverse itself won't attempt to build its own inverse, which would otherwise fail in this example (e.g., m = m1 + m2 + m3 happens to raises an exception for this reason.) Note that this permanently disables it. To prevent that either copy the model or restore the inverse later. """ del model.inverse return model
454dcc5014d83e368796e6a1531a4b66221dc2ab05ca92ed4aee54aa6bd065e8
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Convolution Model""" # pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name import numpy as np from .core import CompoundModel class Convolution(CompoundModel): """ Wrapper class for a convolution model. Parameters ---------- operator: tuple The SPECIAL_OPERATORS entry for the convolution being used. model : Model The model for the convolution. kernel: Model The kernel model for the convolution. bounding_box : tuple A bounding box to define the limits of the integration approximation for the convolution. resolution : float The resolution for the approximation of the convolution. cache : bool, optional Allow convolution computation to be cached for reuse. This is enabled by default. Notes ----- This is wrapper is necessary to handle the limitations of the pseudospectral convolution binary operator implemented in astropy.convolution under `~astropy.convolution.convolve_fft`. In this `~astropy.convolution.convolve_fft` it is assumed that the inputs ``array`` and ``kernel`` span a sufficient portion of the support of the functions of the convolution. Consequently, the ``Compound`` created by the `~astropy.convolution.convolve_models` function makes the assumption that one should pass an input array that sufficiently spans this space. This means that slightly different input arrays to this model will result in different outputs, even on points of intersection between these arrays. This issue is solved by requiring a ``bounding_box`` together with a resolution so that one can pre-calculate the entire domain and then (by default) cache the convolution values. The function then just interpolates the results from this cache. """ def __init__(self, operator, model, kernel, bounding_box, resolution, cache=True): super().__init__(operator, model, kernel) self.bounding_box = bounding_box self._resolution = resolution self._cache_convolution = cache self._kwargs = None self._convolution = None def clear_cache(self): """ Clears the cached convolution """ self._kwargs = None self._convolution = None def _get_convolution(self, **kwargs): if (self._convolution is None) or (self._kwargs != kwargs): domain = self.bounding_box.domain(self._resolution) mesh = np.meshgrid(*domain) data = super().__call__(*mesh, **kwargs) from scipy.interpolate import RegularGridInterpolator convolution = RegularGridInterpolator(domain, data) if self._cache_convolution: self._kwargs = kwargs self._convolution = convolution else: convolution = self._convolution return convolution @staticmethod def _convolution_inputs(*args): not_scalar = np.where([not np.isscalar(arg) for arg in args])[0] if len(not_scalar) == 0: return np.array(args), (1,) else: output_shape = args[not_scalar[0]].shape if not all(args[index].shape == output_shape for index in not_scalar): raise ValueError("Values have differing shapes") inputs = [] for arg in args: if np.isscalar(arg): inputs.append(np.full(output_shape, arg)) else: inputs.append(arg) return np.reshape(inputs, (len(inputs), -1)).T, output_shape @staticmethod def _convolution_outputs(outputs, output_shape): return outputs.reshape(output_shape) def __call__(self, *args, **kw): inputs, output_shape = self._convolution_inputs(*args) convolution = self._get_convolution(**kw) outputs = convolution(inputs) return self._convolution_outputs(outputs, output_shape)
32650f299da513a7ce971d1541c8cce546e7d417854f9bfb9a9d8a8524b281da
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Creates a common namespace for all pre-defined models. """ from . import math_functions as math from .core import custom_model, fix_inputs, hide_inverse from .functional_models import * from .mappings import * from .physical_models import * from .polynomial import * from .powerlaws import * from .projections import * from .rotations import * from .spline import * from .tabular import * # Attach a docstring explaining constraints to all models which support them. # Note: add new models to this list CONSTRAINTS_DOC = """ Other Parameters ---------------- fixed : a dict, optional A dictionary ``{parameter_name: boolean}`` of parameters to not be varied during fitting. True means the parameter is held fixed. Alternatively the `~astropy.modeling.Parameter.fixed` property of a parameter may be used. tied : dict, optional A dictionary ``{parameter_name: callable}`` of parameters which are linked to some other parameter. The dictionary values are callables providing the linking relationship. Alternatively the `~astropy.modeling.Parameter.tied` property of a parameter may be used. bounds : dict, optional A dictionary ``{parameter_name: value}`` of lower and upper bounds of parameters. Keys are parameter names. Values are a list or a tuple of length 2 giving the desired range for the parameter. Alternatively, the `~astropy.modeling.Parameter.min` and `~astropy.modeling.Parameter.max` properties of a parameter may be used. eqcons : list, optional A list of functions of length ``n`` such that ``eqcons[j](x0,*args) == 0.0`` in a successfully optimized problem. ineqcons : list, optional A list of functions of length ``n`` such that ``ieqcons[j](x0,*args) >= 0.0`` is a successfully optimized problem. """ MODELS_WITH_CONSTRAINTS = [ AiryDisk2D, Moffat1D, Moffat2D, Box1D, Box2D, Const1D, Const2D, Ellipse2D, Disk2D, Gaussian1D, Gaussian2D, Linear1D, Lorentz1D, RickerWavelet1D, RickerWavelet2D, PowerLaw1D, Sersic1D, Sersic2D, Sine1D, Cosine1D, Tangent1D, ArcSine1D, ArcCosine1D, ArcTangent1D, Trapezoid1D, TrapezoidDisk2D, Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre2D, Legendre1D, Polynomial1D, Polynomial2D, Voigt1D, KingProjectedAnalytic1D, NFW, ] for item in MODELS_WITH_CONSTRAINTS: if isinstance(item.__doc__, str): item.__doc__ += CONSTRAINTS_DOC
f0875d6ace542b3ed062eb456bcedb4c0e8da62c7b196285e592e27de54b437e
""" Special models useful for complex compound models where control is needed over which outputs from a source model are mapped to which inputs of a target model. """ # pylint: disable=invalid-name from astropy.units import Quantity from .core import FittableModel, Model __all__ = ["Mapping", "Identity", "UnitsMapping"] class Mapping(FittableModel): """ Allows inputs to be reordered, duplicated or dropped. Parameters ---------- mapping : tuple A tuple of integers representing indices of the inputs to this model to return and in what order to return them. See :ref:`astropy:compound-model-mappings` for more details. n_inputs : int Number of inputs; if `None` (default) then ``max(mapping) + 1`` is used (i.e. the highest input index used in the mapping). name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Raises ------ TypeError Raised when number of inputs is less that ``max(mapping)``. Examples -------- >>> from astropy.modeling.models import Polynomial2D, Shift, Mapping >>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3) >>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1) >>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2) >>> model(1, 2) # doctest: +FLOAT_CMP (17.0, 14.2) """ linear = True # FittableModel is non-linear by default def __init__(self, mapping, n_inputs=None, name=None, meta=None): self._inputs = () self._outputs = () if n_inputs is None: self._n_inputs = max(mapping) + 1 else: self._n_inputs = n_inputs self._n_outputs = len(mapping) super().__init__(name=name, meta=meta) self.inputs = tuple("x" + str(idx) for idx in range(self._n_inputs)) self.outputs = tuple("x" + str(idx) for idx in range(self._n_outputs)) self._mapping = mapping self._input_units_strict = {key: False for key in self._inputs} self._input_units_allow_dimensionless = {key: False for key in self._inputs} @property def n_inputs(self): return self._n_inputs @property def n_outputs(self): return self._n_outputs @property def mapping(self): """Integers representing indices of the inputs.""" return self._mapping def __repr__(self): if self.name is None: return f"<Mapping({self.mapping})>" return f"<Mapping({self.mapping}, name={self.name!r})>" def evaluate(self, *args): if len(args) != self.n_inputs: name = self.name if self.name is not None else "Mapping" raise TypeError(f"{name} expects {self.n_inputs} inputs; got {len(args)}") result = tuple(args[idx] for idx in self._mapping) if self.n_outputs == 1: return result[0] return result @property def inverse(self): """ A `Mapping` representing the inverse of the current mapping. Raises ------ `NotImplementedError` An inverse does no exist on mappings that drop some of its inputs (there is then no way to reconstruct the inputs that were dropped). """ try: mapping = tuple(self.mapping.index(idx) for idx in range(self.n_inputs)) except ValueError: raise NotImplementedError( f"Mappings such as {self.mapping} that drop one or more of their inputs" " are not invertible at this time." ) inv = self.__class__(mapping) inv._inputs = self._outputs inv._outputs = self._inputs inv._n_inputs = len(inv._inputs) inv._n_outputs = len(inv._outputs) return inv class Identity(Mapping): """ Returns inputs unchanged. This class is useful in compound models when some of the inputs must be passed unchanged to the next model. Parameters ---------- n_inputs : int Specifies the number of inputs this identity model accepts. name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Examples -------- Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs:: >>> from astropy.modeling.models import (Polynomial1D, Shift, Scale, ... Identity) >>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2) >>> model(1,1) # doctest: +FLOAT_CMP (2.4, 2.0) >>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP (1.0, 1.0) """ linear = True # FittableModel is non-linear by default def __init__(self, n_inputs, name=None, meta=None): mapping = tuple(range(n_inputs)) super().__init__(mapping, name=name, meta=meta) def __repr__(self): if self.name is None: return f"<Identity({self.n_inputs})>" return f"<Identity({self.n_inputs}, name={self.name!r})>" @property def inverse(self): """ The inverse transformation. In this case of `Identity`, ``self.inverse is self``. """ return self class UnitsMapping(Model): """ Mapper that operates on the units of the input, first converting to canonical units, then assigning new units without further conversion. Used by Model.coerce_units to support units on otherwise unitless models such as Polynomial1D. Parameters ---------- mapping : tuple A tuple of (input_unit, output_unit) pairs, one per input, matched to the inputs by position. The first element of the each pair is the unit that the model will accept (specify ``dimensionless_unscaled`` to accept dimensionless input). The second element is the unit that the model will return. Specify ``dimensionless_unscaled`` to return dimensionless Quantity, and `None` to return raw values without Quantity. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : dict or bool, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like, optional Free-form metadata to associate with this model. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D, UnitsMapping >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = UnitsMapping(((u.m, None),)) | poly >>> model = model | UnitsMapping(((None, u.s),)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D, UnitsMapping >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = UnitsMapping(((u.m, None),), input_units_allow_dimensionless=True) | poly >>> model = model | UnitsMapping(((None, u.s),)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s> """ def __init__( self, mapping, input_units_equivalencies=None, input_units_allow_dimensionless=False, name=None, meta=None, ): self._mapping = mapping none_mapping_count = len([m for m in mapping if m[-1] is None]) if none_mapping_count > 0 and none_mapping_count != len(mapping): raise ValueError("If one return unit is None, then all must be None") # These attributes are read and handled by Model self._input_units_strict = True self.input_units_equivalencies = input_units_equivalencies self._input_units_allow_dimensionless = input_units_allow_dimensionless super().__init__(name=name, meta=meta) # Can't invoke this until after super().__init__, since # we need self.inputs and self.outputs to be populated. self._rebuild_units() def _rebuild_units(self): self._input_units = { input_name: input_unit for input_name, (input_unit, _) in zip(self.inputs, self.mapping) } @property def n_inputs(self): return len(self._mapping) @property def n_outputs(self): return len(self._mapping) @property def inputs(self): return super().inputs @inputs.setter def inputs(self, value): super(UnitsMapping, self.__class__).inputs.fset(self, value) self._rebuild_units() @property def outputs(self): return super().outputs @outputs.setter def outputs(self, value): super(UnitsMapping, self.__class__).outputs.fset(self, value) self._rebuild_units() @property def input_units(self): return self._input_units @property def mapping(self): return self._mapping def evaluate(self, *args): result = [] for arg, (_, return_unit) in zip(args, self.mapping): if isinstance(arg, Quantity): value = arg.value else: value = arg if return_unit is None: result.append(value) else: result.append(Quantity(value, return_unit, subok=True)) if self.n_outputs == 1: return result[0] else: return tuple(result) def __repr__(self): if self.name is None: return f"<UnitsMapping({self.mapping})>" else: return f"<UnitsMapping({self.mapping}, name={self.name!r})>"
418f01364212098c5a43d2c26742c85308ccf21a9a61dbc379ec7cfbd239ff8f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Spline models and fitters.""" # pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name import abc import functools import warnings import numpy as np from astropy.utils import isiterable from astropy.utils.exceptions import AstropyUserWarning from .core import FittableModel, ModelDefinitionError from .parameters import Parameter __all__ = [ "Spline1D", "SplineInterpolateFitter", "SplineSmoothingFitter", "SplineExactKnotsFitter", "SplineSplrepFitter", ] __doctest_requires__ = {"Spline1D": ["scipy"]} class _Spline(FittableModel): """Base class for spline models""" _knot_names = () _coeff_names = () optional_inputs = {} def __init__( self, knots=None, coeffs=None, degree=None, bounds=None, n_models=None, model_set_axis=None, name=None, meta=None, ): super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta ) self._user_knots = False self._init_tck(degree) # Hack to allow an optional model argument self._create_optional_inputs() if knots is not None: self._init_spline(knots, coeffs, bounds) elif coeffs is not None: raise ValueError( "If one passes a coeffs vector one needs to also pass knots!" ) @property def param_names(self): """ Coefficient names generated based on the spline's degree and number of knots. """ return tuple(list(self._knot_names) + list(self._coeff_names)) @staticmethod def _optional_arg(arg): return f"_{arg}" def _create_optional_inputs(self): for arg in self.optional_inputs: attribute = self._optional_arg(arg) if hasattr(self, attribute): raise ValueError( f"Optional argument {arg} already exists in this class!" ) else: setattr(self, attribute, None) def _intercept_optional_inputs(self, **kwargs): new_kwargs = kwargs for arg in self.optional_inputs: if arg in kwargs: attribute = self._optional_arg(arg) if getattr(self, attribute) is None: setattr(self, attribute, kwargs[arg]) del new_kwargs[arg] else: raise RuntimeError( f"{arg} has already been set, something has gone wrong!" ) return new_kwargs def evaluate(self, *args, **kwargs): """Extract the optional kwargs passed to call""" optional_inputs = kwargs for arg in self.optional_inputs: attribute = self._optional_arg(arg) if arg in kwargs: # Options passed in optional_inputs[arg] = kwargs[arg] elif getattr(self, attribute) is not None: # No options passed in and Options set optional_inputs[arg] = getattr(self, attribute) setattr(self, attribute, None) else: # No options passed in and No options set optional_inputs[arg] = self.optional_inputs[arg] return optional_inputs def __call__(self, *args, **kwargs): """ Make model callable to model evaluation """ # Hack to allow an optional model argument kwargs = self._intercept_optional_inputs(**kwargs) return super().__call__(*args, **kwargs) def _create_parameter(self, name: str, index: int, attr: str, fixed=False): """ Create a spline parameter linked to an attribute array. Parameters ---------- name : str Name for the parameter index : int The index of the parameter in the array attr : str The name for the attribute array fixed : optional, bool If the parameter should be fixed or not """ # Hack to allow parameters and attribute array to freely exchange values # _getter forces reading value from attribute array # _setter forces setting value to attribute array def _getter(value, model: "_Spline", index: int, attr: str): return getattr(model, attr)[index] def _setter(value, model: "_Spline", index: int, attr: str): getattr(model, attr)[index] = value return value getter = functools.partial(_getter, index=index, attr=attr) setter = functools.partial(_setter, index=index, attr=attr) default = getattr(self, attr) param = Parameter( name=name, default=default[index], fixed=fixed, getter=getter, setter=setter ) # setter/getter wrapper for parameters in this case require the # parameter to have a reference back to its parent model param.model = self param.value = default[index] # Add parameter to model self.__dict__[name] = param def _create_parameters(self, base_name: str, attr: str, fixed=False): """ Create a spline parameters linked to an attribute array for all elements in that array Parameters ---------- base_name : str Base name for the parameters attr : str The name for the attribute array fixed : optional, bool If the parameters should be fixed or not """ names = [] for index in range(len(getattr(self, attr))): name = f"{base_name}{index}" names.append(name) self._create_parameter(name, index, attr, fixed) return tuple(names) @abc.abstractmethod def _init_parameters(self): raise NotImplementedError("This needs to be implemented") @abc.abstractmethod def _init_data(self, knots, coeffs, bounds=None): raise NotImplementedError("This needs to be implemented") def _init_spline(self, knots, coeffs, bounds=None): self._init_data(knots, coeffs, bounds) self._init_parameters() # fill _parameters and related attributes self._initialize_parameters((), {}) self._initialize_slices() # Calling this will properly fill the _parameter vector, which is # used directly sometimes without being properly filled. _ = self.parameters def _init_tck(self, degree): self._c = None self._t = None self._degree = degree class Spline1D(_Spline): """ One dimensional Spline Model Parameters ---------- knots : optional Define the knots for the spline. Can be 1) the number of interior knots for the spline, 2) the array of all knots for the spline, or 3) If both bounds are defined, the interior knots for the spline coeffs : optional The array of knot coefficients for the spline degree : optional The degree of the spline. It must be 1 <= degree <= 5, default is 3. bounds : optional The upper and lower bounds of the spline. Notes ----- Much of the functionality of this model is provided by `scipy.interpolate.BSpline` which can be directly accessed via the bspline property. Fitting for this model is provided by wrappers for: `scipy.interpolate.UnivariateSpline`, `scipy.interpolate.InterpolatedUnivariateSpline`, and `scipy.interpolate.LSQUnivariateSpline`. If one fails to define any knots/coefficients, no parameters will be added to this model until a fitter is called. This is because some of the fitters for splines vary the number of parameters and so we cannot define the parameter set until after fitting in these cases. Since parameters are not necessarily known at model initialization, setting model parameters directly via the model interface has been disabled. Direct constructors are provided for this model which incorporate the fitting to data directly into model construction. Knot parameters are declared as "fixed" parameters by default to enable the use of other `astropy.modeling` fitters to be used to fit this model. Examples -------- >>> import numpy as np >>> from astropy.modeling.models import Spline1D >>> from astropy.modeling import fitting >>> np.random.seed(42) >>> x = np.linspace(-3, 3, 50) >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50) >>> xs = np.linspace(-3, 3, 1000) A 1D interpolating spline can be fit to data: >>> fitter = fitting.SplineInterpolateFitter() >>> spl = fitter(Spline1D(), x, y) Similarly, a smoothing spline can be fit to data: >>> fitter = fitting.SplineSmoothingFitter() >>> spl = fitter(Spline1D(), x, y, s=0.5) Similarly, a spline can be fit to data using an exact set of interior knots: >>> t = [-1, 0, 1] >>> fitter = fitting.SplineExactKnotsFitter() >>> spl = fitter(Spline1D(), x, y, t=t) """ n_inputs = 1 n_outputs = 1 _separable = True optional_inputs = {"nu": 0} def __init__( self, knots=None, coeffs=None, degree=3, bounds=None, n_models=None, model_set_axis=None, name=None, meta=None, ): super().__init__( knots=knots, coeffs=coeffs, degree=degree, bounds=bounds, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, ) @property def t(self): """ The knots vector """ if self._t is None: return np.concatenate( (np.zeros(self._degree + 1), np.ones(self._degree + 1)) ) else: return self._t @t.setter def t(self, value): if self._t is None: raise ValueError( "The model parameters must be initialized before setting knots." ) elif len(value) == len(self._t): self._t = value else: raise ValueError( "There must be exactly as many knots as previously defined." ) @property def t_interior(self): """ The interior knots """ return self.t[self.degree + 1 : -(self.degree + 1)] @property def c(self): """ The coefficients vector """ if self._c is None: return np.zeros(len(self.t)) else: return self._c @c.setter def c(self, value): if self._c is None: raise ValueError( "The model parameters must be initialized before setting coeffs." ) elif len(value) == len(self._c): self._c = value else: raise ValueError( "There must be exactly as many coeffs as previously defined." ) @property def degree(self): """ The degree of the spline polynomials """ return self._degree @property def _initialized(self): return self._t is not None and self._c is not None @property def tck(self): """ Scipy 'tck' tuple representation """ return (self.t, self.c, self.degree) @tck.setter def tck(self, value): if self._initialized: if value[2] != self.degree: raise ValueError("tck has incompatible degree!") self.t = value[0] self.c = value[1] else: self._init_spline(value[0], value[1]) # Calling this will properly fill the _parameter vector, which is # used directly sometimes without being properly filled. _ = self.parameters @property def bspline(self): """ Scipy bspline object representation """ from scipy.interpolate import BSpline return BSpline(*self.tck) @bspline.setter def bspline(self, value): from scipy.interpolate import BSpline if isinstance(value, BSpline): self.tck = value.tck else: self.tck = value @property def knots(self): """ Dictionary of knot parameters """ return [getattr(self, knot) for knot in self._knot_names] @property def user_knots(self): """If the knots have been supplied by the user""" return self._user_knots @user_knots.setter def user_knots(self, value): self._user_knots = value @property def coeffs(self): """ Dictionary of coefficient parameters """ return [getattr(self, coeff) for coeff in self._coeff_names] def _init_parameters(self): self._knot_names = self._create_parameters("knot", "t", fixed=True) self._coeff_names = self._create_parameters("coeff", "c") def _init_bounds(self, bounds=None): if bounds is None: bounds = [None, None] if bounds[0] is None: lower = np.zeros(self._degree + 1) else: lower = np.array([bounds[0]] * (self._degree + 1)) if bounds[1] is None: upper = np.ones(self._degree + 1) else: upper = np.array([bounds[1]] * (self._degree + 1)) if bounds[0] is not None and bounds[1] is not None: self.bounding_box = bounds has_bounds = True else: has_bounds = False return has_bounds, lower, upper def _init_knots(self, knots, has_bounds, lower, upper): if np.issubdtype(type(knots), np.integer): self._t = np.concatenate((lower, np.zeros(knots), upper)) elif isiterable(knots): self._user_knots = True if has_bounds: self._t = np.concatenate((lower, np.array(knots), upper)) else: if len(knots) < 2 * (self._degree + 1): raise ValueError( f"Must have at least {2*(self._degree + 1)} knots." ) self._t = np.array(knots) else: raise ValueError(f"Knots: {knots} must be iterable or value") # check that knots form a viable spline self.bspline def _init_coeffs(self, coeffs=None): if coeffs is None: self._c = np.zeros(len(self._t)) else: self._c = np.array(coeffs) # check that coeffs form a viable spline self.bspline def _init_data(self, knots, coeffs, bounds=None): self._init_knots(knots, *self._init_bounds(bounds)) self._init_coeffs(coeffs) def evaluate(self, *args, **kwargs): """ Evaluate the spline. Parameters ---------- x : (positional) The points where the model is evaluating the spline at nu : optional (kwarg) The derivative of the spline for evaluation, 0 <= nu <= degree + 1. Default: 0. """ kwargs = super().evaluate(*args, **kwargs) x = args[0] if "nu" in kwargs: if kwargs["nu"] > self.degree + 1: raise RuntimeError( "Cannot evaluate a derivative of " f"order higher than {self.degree + 1}" ) return self.bspline(x, **kwargs) def derivative(self, nu=1): """ Create a spline that is the derivative of this one Parameters ---------- nu : int, optional Derivative order, default is 1. """ if nu <= self.degree: bspline = self.bspline.derivative(nu=nu) derivative = Spline1D(degree=bspline.k) derivative.bspline = bspline return derivative else: raise ValueError(f"Must have nu <= {self.degree}") def antiderivative(self, nu=1): """ Create a spline that is an antiderivative of this one Parameters ---------- nu : int, optional Antiderivative order, default is 1. Notes ----- Assumes constant of integration is 0 """ if (nu + self.degree) <= 5: bspline = self.bspline.antiderivative(nu=nu) antiderivative = Spline1D(degree=bspline.k) antiderivative.bspline = bspline return antiderivative else: raise ValueError( "Supported splines can have max degree 5, " f"antiderivative degree will be {nu + self.degree}" ) class _SplineFitter(abc.ABC): """ Base Spline Fitter """ def __init__(self): self.fit_info = {"resid": None, "spline": None} def _set_fit_info(self, spline): self.fit_info["resid"] = spline.get_residual() self.fit_info["spline"] = spline @abc.abstractmethod def _fit_method(self, model, x, y, **kwargs): raise NotImplementedError("This has not been implemented for _SplineFitter.") def __call__(self, model, x, y, z=None, **kwargs): model_copy = model.copy() if isinstance(model_copy, Spline1D): if z is not None: raise ValueError("1D model can only have 2 data points.") spline = self._fit_method(model_copy, x, y, **kwargs) else: raise ModelDefinitionError( "Only spline models are compatible with this fitter." ) self._set_fit_info(spline) return model_copy class SplineInterpolateFitter(_SplineFitter): """ Fit an interpolating spline """ def _fit_method(self, model, x, y, **kwargs): weights = kwargs.pop("weights", None) bbox = kwargs.pop("bbox", [None, None]) if model.user_knots: warnings.warn( "The current user specified knots maybe ignored for interpolating data", AstropyUserWarning, ) model.user_knots = False if bbox != [None, None]: model.bounding_box = bbox from scipy.interpolate import InterpolatedUnivariateSpline spline = InterpolatedUnivariateSpline( x, y, w=weights, bbox=bbox, k=model.degree ) model.tck = spline._eval_args return spline class SplineSmoothingFitter(_SplineFitter): """ Fit a smoothing spline """ def _fit_method(self, model, x, y, **kwargs): s = kwargs.pop("s", None) weights = kwargs.pop("weights", None) bbox = kwargs.pop("bbox", [None, None]) if model.user_knots: warnings.warn( "The current user specified knots maybe ignored for smoothing data", AstropyUserWarning, ) model.user_knots = False if bbox != [None, None]: model.bounding_box = bbox from scipy.interpolate import UnivariateSpline spline = UnivariateSpline(x, y, w=weights, bbox=bbox, k=model.degree, s=s) model.tck = spline._eval_args return spline class SplineExactKnotsFitter(_SplineFitter): """ Fit a spline using least-squares regression. """ def _fit_method(self, model, x, y, **kwargs): t = kwargs.pop("t", None) weights = kwargs.pop("weights", None) bbox = kwargs.pop("bbox", [None, None]) if t is not None: if model.user_knots: warnings.warn( "The current user specified knots will be " "overwritten for by knots passed into this function", AstropyUserWarning, ) else: if model.user_knots: t = model.t_interior else: raise RuntimeError("No knots have been provided") if bbox != [None, None]: model.bounding_box = bbox from scipy.interpolate import LSQUnivariateSpline spline = LSQUnivariateSpline(x, y, t, w=weights, bbox=bbox, k=model.degree) model.tck = spline._eval_args return spline class SplineSplrepFitter(_SplineFitter): """ Fit a spline using the `scipy.interpolate.splrep` function interface. """ def __init__(self): super().__init__() self.fit_info = {"fp": None, "ier": None, "msg": None} def _fit_method(self, model, x, y, **kwargs): t = kwargs.pop("t", None) s = kwargs.pop("s", None) task = kwargs.pop("task", 0) weights = kwargs.pop("weights", None) bbox = kwargs.pop("bbox", [None, None]) if t is not None: if model.user_knots: warnings.warn( "The current user specified knots will be " "overwritten for by knots passed into this function", AstropyUserWarning, ) else: if model.user_knots: t = model.t_interior if bbox != [None, None]: model.bounding_box = bbox from scipy.interpolate import splrep tck, fp, ier, msg = splrep( x, y, w=weights, xb=bbox[0], xe=bbox[1], k=model.degree, s=s, t=t, task=task, full_output=1, ) model.tck = tck return fp, ier, msg def _set_fit_info(self, spline): self.fit_info["fp"] = spline[0] self.fit_info["ier"] = spline[1] self.fit_info["msg"] = spline[2]
4e359dec38b73818e8d71ab9f694131f8e5e5a66173febaad4c9b5844d0138d5
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name """ Optimization algorithms used in `~astropy.modeling.fitting`. """ import abc import warnings import numpy as np from astropy.utils.exceptions import AstropyUserWarning __all__ = ["Optimization", "SLSQP", "Simplex"] # Maximum number of iterations DEFAULT_MAXITER = 100 # Step for the forward difference approximation of the Jacobian DEFAULT_EPS = np.sqrt(np.finfo(float).eps) # Default requested accuracy DEFAULT_ACC = 1e-07 DEFAULT_BOUNDS = (-(10**12), 10**12) class Optimization(metaclass=abc.ABCMeta): """ Base class for optimizers. Parameters ---------- opt_method : callable Implements optimization method Notes ----- The base Optimizer does not support any constraints by default; individual optimizers should explicitly set this list to the specific constraints it supports. """ supported_constraints = [] def __init__(self, opt_method): self._opt_method = opt_method self._maxiter = DEFAULT_MAXITER self._eps = DEFAULT_EPS self._acc = DEFAULT_ACC @property def maxiter(self): """Maximum number of iterations""" return self._maxiter @maxiter.setter def maxiter(self, val): """Set maxiter""" self._maxiter = val @property def eps(self): """Step for the forward difference approximation of the Jacobian""" return self._eps @eps.setter def eps(self, val): """Set eps value""" self._eps = val @property def acc(self): """Requested accuracy""" return self._acc @acc.setter def acc(self, val): """Set accuracy""" self._acc = val def __repr__(self): fmt = f"{self.__class__.__name__}()" return fmt @property def opt_method(self): """Return the optimization method.""" return self._opt_method @abc.abstractmethod def __call__(self): raise NotImplementedError("Subclasses should implement this method") class SLSQP(Optimization): """ Sequential Least Squares Programming optimization algorithm. The algorithm is described in [1]_. It supports tied and fixed parameters, as well as bounded constraints. Uses `scipy.optimize.fmin_slsqp`. References ---------- .. [1] http://www.netlib.org/toms/733 """ supported_constraints = ["bounds", "eqcons", "ineqcons", "fixed", "tied"] def __init__(self): from scipy.optimize import fmin_slsqp super().__init__(fmin_slsqp) self.fit_info = { "final_func_val": None, "numiter": None, "exit_mode": None, "message": None, } def __call__(self, objfunc, initval, fargs, **kwargs): """ Run the solver. Parameters ---------- objfunc : callable objection function initval : iterable initial guess for the parameter values fargs : tuple other arguments to be passed to the statistic function kwargs : dict other keyword arguments to be passed to the solver """ kwargs["iter"] = kwargs.pop("maxiter", self._maxiter) if "epsilon" not in kwargs: kwargs["epsilon"] = self._eps if "acc" not in kwargs: kwargs["acc"] = self._acc # Get the verbosity level disp = kwargs.pop("verblevel", None) # set the values of constraints to match the requirements of fmin_slsqp model = fargs[0] pars = [getattr(model, name) for name in model.param_names] bounds = [par.bounds for par in pars if not (par.fixed or par.tied)] bounds = np.asarray(bounds) for i in bounds: if i[0] is None: i[0] = DEFAULT_BOUNDS[0] if i[1] is None: i[1] = DEFAULT_BOUNDS[1] # older versions of scipy require this array to be float bounds = np.asarray(bounds, dtype=float) eqcons = np.array(model.eqcons) ineqcons = np.array(model.ineqcons) fitparams, final_func_val, numiter, exit_mode, mess = self.opt_method( objfunc, initval, args=fargs, full_output=True, disp=disp, bounds=bounds, eqcons=eqcons, ieqcons=ineqcons, **kwargs, ) self.fit_info["final_func_val"] = final_func_val self.fit_info["numiter"] = numiter self.fit_info["exit_mode"] = exit_mode self.fit_info["message"] = mess if exit_mode != 0: warnings.warn( "The fit may be unsuccessful; check " "fit_info['message'] for more information.", AstropyUserWarning, ) return fitparams, self.fit_info class Simplex(Optimization): """ Neald-Mead (downhill simplex) algorithm. This algorithm [1]_ only uses function values, not derivatives. Uses `scipy.optimize.fmin`. References ---------- .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function minimization", The Computer Journal, 7, pp. 308-313 """ supported_constraints = ["bounds", "fixed", "tied"] def __init__(self): from scipy.optimize import fmin as simplex super().__init__(simplex) self.fit_info = { "final_func_val": None, "numiter": None, "exit_mode": None, "num_function_calls": None, } def __call__(self, objfunc, initval, fargs, **kwargs): """ Run the solver. Parameters ---------- objfunc : callable objection function initval : iterable initial guess for the parameter values fargs : tuple other arguments to be passed to the statistic function kwargs : dict other keyword arguments to be passed to the solver """ if "maxiter" not in kwargs: kwargs["maxiter"] = self._maxiter if "acc" in kwargs: self._acc = kwargs["acc"] kwargs.pop("acc") if "xtol" in kwargs: self._acc = kwargs["xtol"] kwargs.pop("xtol") # Get the verbosity level disp = kwargs.pop("verblevel", None) fitparams, final_func_val, numiter, funcalls, exit_mode = self.opt_method( objfunc, initval, args=fargs, xtol=self._acc, disp=disp, full_output=True, **kwargs, ) self.fit_info["final_func_val"] = final_func_val self.fit_info["numiter"] = numiter self.fit_info["exit_mode"] = exit_mode self.fit_info["num_function_calls"] = funcalls if self.fit_info["exit_mode"] == 1: warnings.warn( "The fit may be unsuccessful; " "Maximum number of function evaluations reached.", AstropyUserWarning, ) elif self.fit_info["exit_mode"] == 2: warnings.warn( "The fit may be unsuccessful; Maximum number of iterations reached.", AstropyUserWarning, ) return fitparams, self.fit_info
1d94ee5c25f35f736f6ba0f1e72216c40a475f072eaa8eb4fd6d2a608a99bf86
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Implements rotations, including spherical rotations as defined in WCS Paper II [1]_ `RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in WCS Paper II to rotate to/from a native sphere and the celestial sphere. The implementation uses `EulerAngleRotation`. The model parameters are three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat`` and ``-(lon_pole-90)``. References ---------- .. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II) """ # pylint: disable=invalid-name, too-many-arguments, no-member import math from functools import reduce import numpy as np from astropy import units as u from astropy.coordinates.matrix_utilities import rotation_matrix from .core import Model from .parameters import Parameter from .utils import _to_orig_unit, _to_radian __all__ = [ "RotateCelestial2Native", "RotateNative2Celestial", "Rotation2D", "EulerAngleRotation", "RotationSequence3D", "SphericalRotationSequence", ] def _create_matrix(angles, axes_order): matrices = [] for angle, axis in zip(angles, axes_order): if isinstance(angle, u.Quantity): angle = angle.value angle = angle.item() matrices.append(rotation_matrix(angle, axis, unit=u.rad)) return reduce(np.matmul, matrices[::-1]) def spherical2cartesian(alpha, delta): alpha = np.deg2rad(alpha) delta = np.deg2rad(delta) x = np.cos(alpha) * np.cos(delta) y = np.cos(delta) * np.sin(alpha) z = np.sin(delta) return np.array([x, y, z]) def cartesian2spherical(x, y, z): h = np.hypot(x, y) alpha = np.rad2deg(np.arctan2(y, x)) delta = np.rad2deg(np.arctan2(z, h)) return alpha, delta class RotationSequence3D(Model): """ Perform a series of rotations about different axis in 3D space. Positive angles represent a counter-clockwise rotation. Parameters ---------- angles : array-like Angles of rotation in deg in the order of axes_order. axes_order : str A sequence of 'x', 'y', 'z' corresponding to axis of rotation. Examples -------- >>> model = RotationSequence3D([1.1, 2.1, 3.1, 4.1], axes_order='xyzx') """ standard_broadcasting = False _separable = False n_inputs = 3 n_outputs = 3 angles = Parameter( default=[], getter=_to_orig_unit, setter=_to_radian, description="Angles of rotation in deg in the order of axes_order", ) def __init__(self, angles, axes_order, name=None): self.axes = ["x", "y", "z"] unrecognized = set(axes_order).difference(self.axes) if unrecognized: raise ValueError( f"Unrecognized axis label {unrecognized}; should be one of {self.axes} " ) self.axes_order = axes_order if len(angles) != len(axes_order): raise ValueError( f"The number of angles {len(angles)} should match " f"the number of axes {len(axes_order)}." ) super().__init__(angles, name=name) self._inputs = ("x", "y", "z") self._outputs = ("x", "y", "z") @property def inverse(self): """Inverse rotation.""" angles = self.angles.value[::-1] * -1 return self.__class__(angles, axes_order=self.axes_order[::-1]) def evaluate(self, x, y, z, angles): """ Apply the rotation to a set of 3D Cartesian coordinates. """ if x.shape != y.shape or x.shape != z.shape: raise ValueError("Expected input arrays to have the same shape") # Note: If the original shape was () (an array scalar) convert to a # 1-element 1-D array on output for consistency with most other models orig_shape = x.shape or (1,) inarr = np.array([x.flatten(), y.flatten(), z.flatten()]) result = np.dot(_create_matrix(angles[0], self.axes_order), inarr) x, y, z = result[0], result[1], result[2] x.shape = y.shape = z.shape = orig_shape return x, y, z class SphericalRotationSequence(RotationSequence3D): """ Perform a sequence of rotations about arbitrary number of axes in spherical coordinates. Parameters ---------- angles : list A sequence of angles (in deg). axes_order : str A sequence of characters ('x', 'y', or 'z') corresponding to the axis of rotation and matching the order in ``angles``. """ def __init__(self, angles, axes_order, name=None, **kwargs): self._n_inputs = 2 self._n_outputs = 2 super().__init__(angles, axes_order=axes_order, name=name, **kwargs) self._inputs = ("lon", "lat") self._outputs = ("lon", "lat") @property def n_inputs(self): return self._n_inputs @property def n_outputs(self): return self._n_outputs def evaluate(self, lon, lat, angles): x, y, z = spherical2cartesian(lon, lat) x1, y1, z1 = super().evaluate(x, y, z, angles) lon, lat = cartesian2spherical(x1, y1, z1) return lon, lat class _EulerRotation: """ Base class which does the actual computation. """ _separable = False def evaluate(self, alpha, delta, phi, theta, psi, axes_order): shape = None if isinstance(alpha, np.ndarray): alpha = alpha.flatten() delta = delta.flatten() shape = alpha.shape inp = spherical2cartesian(alpha, delta) matrix = _create_matrix([phi, theta, psi], axes_order) result = np.dot(matrix, inp) a, b = cartesian2spherical(*result) if shape is not None: a.shape = shape b.shape = shape return a, b _input_units_strict = True _input_units_allow_dimensionless = True @property def input_units(self): """Input units.""" return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): """Output units.""" return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} class EulerAngleRotation(_EulerRotation, Model): """ Implements Euler angle intrinsic rotations. Rotates one coordinate system into another (fixed) coordinate system. All coordinate systems are right-handed. The sign of the angles is determined by the right-hand rule.. Parameters ---------- phi, theta, psi : float or `~astropy.units.Quantity` ['angle'] "proper" Euler angles in deg. If floats, they should be in deg. axes_order : str A 3 character string, a combination of 'x', 'y' and 'z', where each character denotes an axis in 3D space. """ n_inputs = 2 n_outputs = 2 phi = Parameter( default=0, getter=_to_orig_unit, setter=_to_radian, description="1st Euler angle (Quantity or value in deg)", ) theta = Parameter( default=0, getter=_to_orig_unit, setter=_to_radian, description="2nd Euler angle (Quantity or value in deg)", ) psi = Parameter( default=0, getter=_to_orig_unit, setter=_to_radian, description="3rd Euler angle (Quantity or value in deg)", ) def __init__(self, phi, theta, psi, axes_order, **kwargs): self.axes = ["x", "y", "z"] if len(axes_order) != 3: raise TypeError( "Expected axes_order to be a character sequence of length 3, " f"got {axes_order}" ) unrecognized = set(axes_order).difference(self.axes) if unrecognized: raise ValueError( f"Unrecognized axis label {unrecognized}; should be one of {self.axes}" ) self.axes_order = axes_order qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]] if any(qs) and not all(qs): raise TypeError( "All parameters should be of the same type - float or Quantity." ) super().__init__(phi=phi, theta=theta, psi=psi, **kwargs) self._inputs = ("alpha", "delta") self._outputs = ("alpha", "delta") @property def inverse(self): return self.__class__( phi=-self.psi, theta=-self.theta, psi=-self.phi, axes_order=self.axes_order[::-1], ) def evaluate(self, alpha, delta, phi, theta, psi): a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order) return a, b class _SkyRotation(_EulerRotation, Model): """ Base class for RotateNative2Celestial and RotateCelestial2Native. """ lon = Parameter( default=0, getter=_to_orig_unit, setter=_to_radian, description="Latitude" ) lat = Parameter( default=0, getter=_to_orig_unit, setter=_to_radian, description="Longtitude" ) lon_pole = Parameter( default=0, getter=_to_orig_unit, setter=_to_radian, description="Longitude of a pole", ) def __init__(self, lon, lat, lon_pole, **kwargs): qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]] if any(qs) and not all(qs): raise TypeError( "All parameters should be of the same type - float or Quantity." ) super().__init__(lon, lat, lon_pole, **kwargs) self.axes_order = "zxz" def _evaluate(self, phi, theta, lon, lat, lon_pole): alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole, self.axes_order) mask = alpha < 0 if isinstance(mask, np.ndarray): alpha[mask] += 360 else: alpha += 360 return alpha, delta class RotateNative2Celestial(_SkyRotation): """ Transform from Native to Celestial Spherical Coordinates. Parameters ---------- lon : float or `~astropy.units.Quantity` ['angle'] Celestial longitude of the fiducial point. lat : float or `~astropy.units.Quantity` ['angle'] Celestial latitude of the fiducial point. lon_pole : float or `~astropy.units.Quantity` ['angle'] Longitude of the celestial pole in the native system. Notes ----- If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg. Inputs are angles on the native sphere. Outputs are angles on the celestial sphere. """ n_inputs = 2 n_outputs = 2 @property def input_units(self): """Input units.""" return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): """Output units.""" return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def __init__(self, lon, lat, lon_pole, **kwargs): super().__init__(lon, lat, lon_pole, **kwargs) self.inputs = ("phi_N", "theta_N") self.outputs = ("alpha_C", "delta_C") def evaluate(self, phi_N, theta_N, lon, lat, lon_pole): """ Parameters ---------- phi_N, theta_N : float or `~astropy.units.Quantity` ['angle'] Angles in the Native coordinate system. it is assumed that numerical only inputs are in degrees. If float, assumed in degrees. lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle'] Parameter values when the model was initialized. If float, assumed in degrees. Returns ------- alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle'] Angles on the Celestial sphere. If float, in degrees. """ # The values are in radians since they have already been through the setter. if isinstance(lon, u.Quantity): lon = lon.value lat = lat.value lon_pole = lon_pole.value # Convert to Euler angles phi = lon_pole - np.pi / 2 theta = -(np.pi / 2 - lat) psi = -(np.pi / 2 + lon) alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi) return alpha_C, delta_C @property def inverse(self): # convert to angles on the celestial sphere return RotateCelestial2Native(self.lon, self.lat, self.lon_pole) class RotateCelestial2Native(_SkyRotation): """ Transform from Celestial to Native Spherical Coordinates. Parameters ---------- lon : float or `~astropy.units.Quantity` ['angle'] Celestial longitude of the fiducial point. lat : float or `~astropy.units.Quantity` ['angle'] Celestial latitude of the fiducial point. lon_pole : float or `~astropy.units.Quantity` ['angle'] Longitude of the celestial pole in the native system. Notes ----- If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg. Inputs are angles on the celestial sphere. Outputs are angles on the native sphere. """ n_inputs = 2 n_outputs = 2 @property def input_units(self): """Input units.""" return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): """Output units.""" return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def __init__(self, lon, lat, lon_pole, **kwargs): super().__init__(lon, lat, lon_pole, **kwargs) # Inputs are angles on the celestial sphere self.inputs = ("alpha_C", "delta_C") # Outputs are angles on the native sphere self.outputs = ("phi_N", "theta_N") def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole): """ Parameters ---------- alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle'] Angles in the Celestial coordinate frame. If float, assumed in degrees. lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle'] Parameter values when the model was initialized. If float, assumed in degrees. Returns ------- phi_N, theta_N : float or `~astropy.units.Quantity` ['angle'] Angles on the Native sphere. If float, in degrees. """ if isinstance(lon, u.Quantity): lon = lon.value lat = lat.value lon_pole = lon_pole.value # Convert to Euler angles phi = np.pi / 2 + lon theta = np.pi / 2 - lat psi = -(lon_pole - np.pi / 2) phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi) return phi_N, theta_N @property def inverse(self): return RotateNative2Celestial(self.lon, self.lat, self.lon_pole) class Rotation2D(Model): """ Perform a 2D rotation given an angle. Positive angles represent a counter-clockwise rotation and vice-versa. Parameters ---------- angle : float or `~astropy.units.Quantity` ['angle'] Angle of rotation (if float it should be in deg). """ n_inputs = 2 n_outputs = 2 _separable = False angle = Parameter( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Angle of rotation (Quantity or value in deg)", ) def __init__(self, angle=angle, **kwargs): super().__init__(angle=angle, **kwargs) self._inputs = ("x", "y") self._outputs = ("x", "y") @property def inverse(self): """Inverse rotation.""" return self.__class__(angle=-self.angle) @classmethod def evaluate(cls, x, y, angle): """ Rotate (x, y) about ``angle``. Parameters ---------- x, y : array-like Input quantities angle : float or `~astropy.units.Quantity` ['angle'] Angle of rotations. If float, assumed in degrees. """ if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") # If one argument has units, enforce they both have units and they are compatible. x_unit = getattr(x, "unit", None) y_unit = getattr(y, "unit", None) has_units = x_unit is not None and y_unit is not None if x_unit != y_unit: if has_units and y_unit.is_equivalent(x_unit): y = y.to(x_unit) y_unit = x_unit else: raise u.UnitsError("x and y must have compatible units") # Note: If the original shape was () (an array scalar) convert to a # 1-element 1-D array on output for consistency with most other models orig_shape = x.shape or (1,) inarr = np.array([x.flatten(), y.flatten()]) if isinstance(angle, u.Quantity): angle = angle.to_value(u.rad) result = np.dot(cls._compute_matrix(angle), inarr) x, y = result[0], result[1] x.shape = y.shape = orig_shape if has_units: return u.Quantity(x, unit=x_unit, subok=True), u.Quantity( y, unit=y_unit, subok=True ) return x, y @staticmethod def _compute_matrix(angle): return np.array( [[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]], dtype=np.float64, )
b0f949c3badf7fb1c22720148c7583735f8effc58fb3a8d90c28601dad36e094
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Define Numpy Ufuncs as Models. """ import numpy as np from astropy.modeling.core import Model trig_ufuncs = [ "sin", "cos", "tan", "arcsin", "arccos", "arctan", "arctan2", "hypot", "sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh", "deg2rad", "rad2deg", ] math_ops = [ "add", "subtract", "multiply", "logaddexp", "logaddexp2", "true_divide", "floor_divide", "negative", "positive", "power", "remainder", "fmod", "divmod", "absolute", "fabs", "rint", "exp", "exp2", "log", "log2", "log10", "expm1", "log1p", "sqrt", "square", "cbrt", "reciprocal", "divide", "mod", ] supported_ufuncs = trig_ufuncs + math_ops # These names are just aliases for other ufunc objects # in the numpy API. The alias name must occur later # in the lists above. alias_ufuncs = { "divide": "true_divide", "mod": "remainder", } class _NPUfuncModel(Model): _is_dynamic = True def __init__(self, **kwargs): super().__init__(**kwargs) def _make_class_name(name): """Make a ufunc model class name from the name of the ufunc.""" return name[0].upper() + name[1:] + "Ufunc" def ufunc_model(name): """Define a Model from a Numpy ufunc name.""" ufunc = getattr(np, name) nin = ufunc.nin nout = ufunc.nout if nin == 1: separable = True def evaluate(self, x): return self.func(x) else: separable = False def evaluate(self, x, y): return self.func(x, y) klass_name = _make_class_name(name) members = { "n_inputs": nin, "n_outputs": nout, "func": ufunc, "linear": False, "fittable": False, "_separable": separable, "_is_dynamic": True, "evaluate": evaluate, } klass = type(str(klass_name), (_NPUfuncModel,), members) klass.__module__ = "astropy.modeling.math_functions" return klass __all__ = [] for name in supported_ufuncs: if name in alias_ufuncs: klass_name = _make_class_name(name) alias_klass_name = _make_class_name(alias_ufuncs[name]) globals()[klass_name] = globals()[alias_klass_name] __all__.append(klass_name) else: m = ufunc_model(name) klass_name = m.__name__ globals()[klass_name] = m __all__.append(klass_name)
9d58e68473b21e9c863fb656554d1418ce96d08f43f3d2e61854c0ad74c43702
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions to determine if a model is separable, i.e. if the model outputs are independent. It analyzes ``n_inputs``, ``n_outputs`` and the operators in a compound model by stepping through the transforms and creating a ``coord_matrix`` of shape (``n_outputs``, ``n_inputs``). Each modeling operator is represented by a function which takes two simple models (or two ``coord_matrix`` arrays) and returns an array of shape (``n_outputs``, ``n_inputs``). """ import numpy as np from .core import CompoundModel, Model, ModelDefinitionError from .mappings import Mapping __all__ = ["is_separable", "separability_matrix"] def is_separable(transform): """ A separability test for the outputs of a transform. Parameters ---------- transform : `~astropy.modeling.core.Model` A (compound) model. Returns ------- is_separable : ndarray A boolean array with size ``transform.n_outputs`` where each element indicates whether the output is independent and the result of a separable transform. Examples -------- >>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D >>> is_separable(Shift(1) & Shift(2) | Scale(1) & Scale(2)) array([ True, True]...) >>> is_separable(Shift(1) & Shift(2) | Rotation2D(2)) array([False, False]...) >>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \ Polynomial2D(1) & Polynomial2D(2)) array([False, False]...) >>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1])) array([ True, True, True, True]...) """ if transform.n_inputs == 1 and transform.n_outputs > 1: is_separable = np.array([False] * transform.n_outputs).T return is_separable separable_matrix = _separable(transform) is_separable = separable_matrix.sum(1) is_separable = np.where(is_separable != 1, False, True) return is_separable def separability_matrix(transform): """ Compute the correlation between outputs and inputs. Parameters ---------- transform : `~astropy.modeling.core.Model` A (compound) model. Returns ------- separable_matrix : ndarray A boolean correlation matrix of shape (n_outputs, n_inputs). Indicates the dependence of outputs on inputs. For completely independent outputs, the diagonal elements are True and off-diagonal elements are False. Examples -------- >>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D >>> separability_matrix(Shift(1) & Shift(2) | Scale(1) & Scale(2)) array([[ True, False], [False, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Rotation2D(2)) array([[ True, True], [ True, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \ Polynomial2D(1) & Polynomial2D(2)) array([[ True, True], [ True, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1])) array([[ True, False], [False, True], [ True, False], [False, True]]...) """ if transform.n_inputs == 1 and transform.n_outputs > 1: return np.ones((transform.n_outputs, transform.n_inputs), dtype=np.bool_) separable_matrix = _separable(transform) separable_matrix = np.where(separable_matrix != 0, True, False) return separable_matrix def _compute_n_outputs(left, right): """ Compute the number of outputs of two models. The two models are the left and right model to an operation in the expression tree of a compound model. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. """ if isinstance(left, Model): lnout = left.n_outputs else: lnout = left.shape[0] if isinstance(right, Model): rnout = right.n_outputs else: rnout = right.shape[0] noutp = lnout + rnout return noutp def _arith_oper(left, right): """ Function corresponding to one of the arithmetic operators ['+', '-'. '*', '/', '**']. This always returns a nonseparable output. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ # models have the same number of inputs and outputs def _n_inputs_outputs(input): if isinstance(input, Model): n_outputs, n_inputs = input.n_outputs, input.n_inputs else: n_outputs, n_inputs = input.shape return n_inputs, n_outputs left_inputs, left_outputs = _n_inputs_outputs(left) right_inputs, right_outputs = _n_inputs_outputs(right) if left_inputs != right_inputs or left_outputs != right_outputs: raise ModelDefinitionError( "Unsupported operands for arithmetic operator: left" f" (n_inputs={left_inputs}, n_outputs={left_outputs}) and right" f" (n_inputs={right_inputs}, n_outputs={right_outputs}); models must have" " the same n_inputs and the same n_outputs for this operator." ) result = np.ones((left_outputs, left_inputs)) return result def _coord_matrix(model, pos, noutp): """ Create an array representing inputs and outputs of a simple model. The array has a shape (noutp, model.n_inputs). Parameters ---------- model : `astropy.modeling.Model` model pos : str Position of this model in the expression tree. One of ['left', 'right']. noutp : int Number of outputs of the compound model of which the input model is a left or right child. """ if isinstance(model, Mapping): axes = [] for i in model.mapping: axis = np.zeros((model.n_inputs,)) axis[i] = 1 axes.append(axis) m = np.vstack(axes) mat = np.zeros((noutp, model.n_inputs)) if pos == "left": mat[: model.n_outputs, : model.n_inputs] = m else: mat[-model.n_outputs :, -model.n_inputs :] = m return mat if not model.separable: # this does not work for more than 2 coordinates mat = np.zeros((noutp, model.n_inputs)) if pos == "left": mat[: model.n_outputs, : model.n_inputs] = 1 else: mat[-model.n_outputs :, -model.n_inputs :] = 1 else: mat = np.zeros((noutp, model.n_inputs)) for i in range(model.n_inputs): mat[i, i] = 1 if pos == "right": mat = np.roll(mat, (noutp - model.n_outputs)) return mat def _cstack(left, right): """ Function corresponding to '&' operation. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ noutp = _compute_n_outputs(left, right) if isinstance(left, Model): cleft = _coord_matrix(left, "left", noutp) else: cleft = np.zeros((noutp, left.shape[1])) cleft[: left.shape[0], : left.shape[1]] = left if isinstance(right, Model): cright = _coord_matrix(right, "right", noutp) else: cright = np.zeros((noutp, right.shape[1])) cright[-right.shape[0] :, -right.shape[1] :] = right return np.hstack([cleft, cright]) def _cdot(left, right): """ Function corresponding to "|" operation. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ left, right = right, left def _n_inputs_outputs(input, position): """ Return ``n_inputs``, ``n_outputs`` for a model or coord_matrix. """ if isinstance(input, Model): coords = _coord_matrix(input, position, input.n_outputs) else: coords = input return coords cleft = _n_inputs_outputs(left, "left") cright = _n_inputs_outputs(right, "right") try: result = np.dot(cleft, cright) except ValueError: raise ModelDefinitionError( 'Models cannot be combined with the "|" operator; ' f"left coord_matrix is {cright}, right coord_matrix is {cleft}" ) return result def _separable(transform): """ Calculate the separability of outputs. Parameters ---------- transform : `astropy.modeling.Model` A transform (usually a compound model). Returns : is_separable : ndarray of dtype np.bool An array of shape (transform.n_outputs,) of boolean type Each element represents the separablity of the corresponding output. """ if ( transform_matrix := transform._calculate_separability_matrix() ) is not NotImplemented: return transform_matrix elif isinstance(transform, CompoundModel): sepleft = _separable(transform.left) sepright = _separable(transform.right) return _operators[transform.op](sepleft, sepright) elif isinstance(transform, Model): return _coord_matrix(transform, "left", transform.n_outputs) # Maps modeling operators to a function computing and represents the # relationship of axes as an array of 0-es and 1-s _operators = { "&": _cstack, "|": _cdot, "+": _arith_oper, "-": _arith_oper, "*": _arith_oper, "/": _arith_oper, "**": _arith_oper, }
d616643fec572904004888f515f2cdd57883789ca2cbd2882d006cc27fb171ee
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides utility functions for the models package. """ import warnings # pylint: disable=invalid-name from collections import UserDict from collections.abc import MutableMapping from inspect import signature import numpy as np from astropy import units as u from astropy.utils.decorators import deprecated __doctest_skip__ = ["AliasDict"] __all__ = ["AliasDict", "poly_map_domain", "comb", "ellipse_extent"] deprecation_msg = """ AliasDict is deprecated because it no longer serves a function anywhere inside astropy. """ @deprecated("5.0", deprecation_msg) class AliasDict(MutableMapping): """ Creates a `dict` like object that wraps an existing `dict` or other `MutableMapping`, along with a `dict` of *key aliases* that translate between specific keys in this dict to different keys in the underlying dict. In other words, keys that do not have an associated alias are accessed and stored like a normal `dict`. However, a key that has an alias is accessed and stored to the "parent" dict via the alias. Parameters ---------- parent : dict-like The parent `dict` that aliased keys and accessed from and stored to. aliases : dict-like Maps keys in this dict to their associated keys in the parent dict. Examples -------- >>> parent = {'a': 1, 'b': 2, 'c': 3} >>> aliases = {'foo': 'a', 'bar': 'c'} >>> alias_dict = AliasDict(parent, aliases) >>> alias_dict['foo'] 1 >>> alias_dict['bar'] 3 Keys in the original parent dict are not visible if they were not aliased: >>> alias_dict['b'] Traceback (most recent call last): ... KeyError: 'b' Likewise, updates to aliased keys are reflected back in the parent dict: >>> alias_dict['foo'] = 42 >>> alias_dict['foo'] 42 >>> parent['a'] 42 However, updates/insertions to keys that are *not* aliased are not reflected in the parent dict: >>> alias_dict['qux'] = 99 >>> alias_dict['qux'] 99 >>> 'qux' in parent False In particular, updates on the `AliasDict` to a key that is equal to one of the aliased keys in the parent dict does *not* update the parent dict. For example, ``alias_dict`` aliases ``'foo'`` to ``'a'``. But assigning to a key ``'a'`` on the `AliasDict` does not impact the parent: >>> alias_dict['a'] = 'nope' >>> alias_dict['a'] 'nope' >>> parent['a'] 42 """ _store_type = dict """ Subclasses may override this to use other mapping types as the underlying storage, for example an `OrderedDict`. However, even in this case additional work may be needed to get things like the ordering right. """ def __init__(self, parent, aliases): self._parent = parent self._store = self._store_type() self._aliases = dict(aliases) def __getitem__(self, key): if key in self._aliases: try: return self._parent[self._aliases[key]] except KeyError: raise KeyError(key) return self._store[key] def __setitem__(self, key, value): if key in self._aliases: self._parent[self._aliases[key]] = value else: self._store[key] = value def __delitem__(self, key): if key in self._aliases: try: del self._parent[self._aliases[key]] except KeyError: raise KeyError(key) else: del self._store[key] def __iter__(self): """ First iterates over keys from the parent dict (if the aliased keys are present in the parent), followed by any keys in the local store. """ for key, alias in self._aliases.items(): if alias in self._parent: yield key for key in self._store: yield key def __len__(self): return len(list(iter(self))) def __repr__(self): # repr() just like any other dict--this should look transparent store_copy = self._store_type() for key, alias in self._aliases.items(): if alias in self._parent: store_copy[key] = self._parent[alias] store_copy.update(self._store) return repr(store_copy) def make_binary_operator_eval(oper, f, g): """ Given a binary operator (as a callable of two arguments) ``oper`` and two callables ``f`` and ``g`` which accept the same arguments, returns a *new* function that takes the same arguments as ``f`` and ``g``, but passes the outputs of ``f`` and ``g`` in the given ``oper``. ``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The given operator is applied element-wise to tuple outputs). Example ------- >>> from operator import add >>> def prod(x, y): ... return (x * y,) ... >>> sum_of_prod = make_binary_operator_eval(add, prod, prod) >>> sum_of_prod(3, 5) (30,) """ return lambda inputs, params: tuple( oper(x, y) for x, y in zip(f(inputs, params), g(inputs, params)) ) def poly_map_domain(oldx, domain, window): """ Map domain into window by shifting and scaling. Parameters ---------- oldx : array original coordinates domain : list or tuple of length 2 function domain window : list or tuple of length 2 range into which to map the domain """ domain = np.array(domain, dtype=np.float64) window = np.array(window, dtype=np.float64) if domain.shape != (2,) or window.shape != (2,): raise ValueError('Expected "domain" and "window" to be a tuple of size 2.') scl = (window[1] - window[0]) / (domain[1] - domain[0]) off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0]) return off + scl * oldx def _validate_domain_window(value): if value is not None: if np.asanyarray(value).shape != (2,): raise ValueError("domain and window should be tuples of size 2.") return tuple(value) return value @deprecated("5.3", alternative="math.comb") def comb(N, k): """ The number of combinations of N things taken k at a time. Parameters ---------- N : int, array Number of things. k : int, array Number of elements taken. """ if (k > N) or (N < 0) or (k < 0): return 0 val = 1 for j in range(min(k, N - k)): val = (val * (N - j)) / (j + 1) return val def array_repr_oneline(array): """ Represents a multi-dimensional Numpy array flattened onto a single line. """ r = np.array2string(array, separator=", ", suppress_small=True) return " ".join(line.strip() for line in r.splitlines()) def combine_labels(left, right): """ For use with the join operator &: Combine left input/output labels with right input/output labels. If none of the labels conflict then this just returns a sum of tuples. However if *any* of the labels conflict, this appends '0' to the left-hand labels and '1' to the right-hand labels so there is no ambiguity). """ if set(left).intersection(right): left = tuple(label + "0" for label in left) right = tuple(label + "1" for label in right) return left + right def ellipse_extent(a, b, theta): """ Calculates the half size of a box encapsulating a rotated 2D ellipse. Parameters ---------- a : float or `~astropy.units.Quantity` The ellipse semimajor axis. b : float or `~astropy.units.Quantity` The ellipse semiminor axis. theta : float or `~astropy.units.Quantity` ['angle'] The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise. Returns ------- offsets : tuple The absolute value of the offset distances from the ellipse center that define its bounding box region, ``(dx, dy)``. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Ellipse2D from astropy.modeling.utils import ellipse_extent, render_model amplitude = 1 x0 = 50 y0 = 50 a = 30 b = 10 theta = np.pi / 4 model = Ellipse2D(amplitude, x0, y0, a, b, theta) dx, dy = ellipse_extent(a, b, theta) limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy] model.bounding_box = limits image = render_model(model) plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5, extent = limits) plt.show() """ from .parameters import Parameter # prevent circular import if isinstance(theta, Parameter): if theta.quantity is None: theta = theta.value else: theta = theta.quantity t = np.arctan2(-b * np.tan(theta), a) dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta) t = np.arctan2(b, a * np.tan(theta)) dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta) if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity): return np.abs(u.Quantity([dx, dy], subok=True)) return np.abs([dx, dy]) def get_inputs_and_params(func): """ Given a callable, determine the input variables and the parameters. Parameters ---------- func : callable Returns ------- inputs, params : tuple Each entry is a list of inspect.Parameter objects """ sig = signature(func) inputs = [] params = [] for param in sig.parameters.values(): if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD): raise ValueError("Signature must not have *args or **kwargs") if param.default == param.empty: inputs.append(param) else: params.append(param) return inputs, params def _combine_equivalency_dict(keys, eq1=None, eq2=None): # Given two dictionaries that give equivalencies for a set of keys, for # example input value names, return a dictionary that includes all the # equivalencies eq = {} for key in keys: eq[key] = [] if eq1 is not None and key in eq1: eq[key].extend(eq1[key]) if eq2 is not None and key in eq2: eq[key].extend(eq2[key]) return eq def _to_radian(value): """Convert ``value`` to radian.""" if isinstance(value, u.Quantity): return value.to(u.rad) return np.deg2rad(value) def _to_orig_unit(value, raw_unit=None, orig_unit=None): """Convert value with ``raw_unit`` to ``orig_unit``.""" if raw_unit is not None: return (value * raw_unit).to(orig_unit) return np.rad2deg(value) class _ConstraintsDict(UserDict): """ Wrapper around UserDict to allow updating the constraints on a Parameter when the dictionary is updated. """ def __init__(self, model, constraint_type): self._model = model self.constraint_type = constraint_type c = {} for name in model.param_names: param = getattr(model, name) c[name] = getattr(param, constraint_type) super().__init__(c) def __setitem__(self, key, val): super().__setitem__(key, val) param = getattr(self._model, key) setattr(param, self.constraint_type, val) class _SpecialOperatorsDict(UserDict): """ Wrapper around UserDict to allow for better tracking of the Special Operators for CompoundModels. This dictionary is structured so that one cannot inadvertently overwrite an existing special operator. Parameters ---------- unique_id: int the last used unique_id for a SPECIAL OPERATOR special_operators: dict a dictionary containing the special_operators Notes ----- Direct setting of operators (`dict[key] = value`) into the dictionary has been deprecated in favor of the `.add(name, value)` method, so that unique dictionary keys can be generated and tracked consistently. """ def __init__(self, unique_id=0, special_operators={}): super().__init__(special_operators) self._unique_id = unique_id def _set_value(self, key, val): if key in self: raise ValueError(f'Special operator "{key}" already exists') else: super().__setitem__(key, val) def __setitem__(self, key, val): self._set_value(key, val) warnings.warn( DeprecationWarning( """ Special operator dictionary assignment has been deprecated. Please use `.add` instead, so that you can capture a unique key for your operator. """ ) ) def _get_unique_id(self): self._unique_id += 1 return self._unique_id def add(self, operator_name, operator): """ Adds a special operator to the dictionary, and then returns the unique key that the operator is stored under for later reference. Parameters ---------- operator_name: str the name for the operator operator: function the actual operator function which will be used Returns ------- the unique operator key for the dictionary `(operator_name, unique_id)` """ key = (operator_name, self._get_unique_id()) self._set_value(key, operator) return key
f0c1a92106fefbd762c90e39dc368f261936067a0717d6b0161a8e3d2019cbe7
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module is to contain an improved bounding box """ import abc import copy import warnings from collections import namedtuple from typing import Any, Callable, Dict, List, Tuple import numpy as np from astropy.units import Quantity from astropy.utils import isiterable __all__ = ["ModelBoundingBox", "CompoundBoundingBox"] _BaseInterval = namedtuple("_BaseInterval", "lower upper") class _Interval(_BaseInterval): """ A single input's bounding box interval. Parameters ---------- lower : float The lower bound of the interval upper : float The upper bound of the interval Methods ------- validate : Contructs a valid interval outside : Determine which parts of an input array are outside the interval. domain : Contructs a discretization of the points inside the interval. """ def __repr__(self): return f"Interval(lower={self.lower}, upper={self.upper})" def copy(self): return copy.deepcopy(self) @staticmethod def _validate_shape(interval): """Validate the shape of an interval representation""" MESSAGE = """An interval must be some sort of sequence of length 2""" try: shape = np.shape(interval) except TypeError: try: # np.shape does not work with lists of Quantities if len(interval) == 1: interval = interval[0] shape = np.shape([b.to_value() for b in interval]) except (ValueError, TypeError, AttributeError): raise ValueError(MESSAGE) valid_shape = shape in ((2,), (1, 2), (2, 0)) if not valid_shape: valid_shape = ( len(shape) > 0 and shape[0] == 2 and all(isinstance(b, np.ndarray) for b in interval) ) if not isiterable(interval) or not valid_shape: raise ValueError(MESSAGE) @classmethod def _validate_bounds(cls, lower, upper): """Validate the bounds are reasonable and construct an interval from them.""" if (np.asanyarray(lower) > np.asanyarray(upper)).all(): warnings.warn( f"Invalid interval: upper bound {upper} " f"is strictly less than lower bound {lower}.", RuntimeWarning, ) return cls(lower, upper) @classmethod def validate(cls, interval): """ Construct and validate an interval Parameters ---------- interval : iterable A representation of the interval. Returns ------- A validated interval. """ cls._validate_shape(interval) if len(interval) == 1: interval = tuple(interval[0]) else: interval = tuple(interval) return cls._validate_bounds(interval[0], interval[1]) def outside(self, _input: np.ndarray): """ Parameters ---------- _input : np.ndarray The evaluation input in the form of an array. Returns ------- Boolean array indicating which parts of _input are outside the interval: True -> position outside interval False -> position inside interval """ return np.logical_or(_input < self.lower, _input > self.upper) def domain(self, resolution): return np.arange(self.lower, self.upper + resolution, resolution) # The interval where all ignored inputs can be found. _ignored_interval = _Interval.validate((-np.inf, np.inf)) def get_index(model, key) -> int: """ Get the input index corresponding to the given key. Can pass in either: the string name of the input or the input index itself. """ if isinstance(key, str): if key in model.inputs: index = model.inputs.index(key) else: raise ValueError(f"'{key}' is not one of the inputs: {model.inputs}.") elif np.issubdtype(type(key), np.integer): if 0 <= key < len(model.inputs): index = key else: raise IndexError( f"Integer key: {key} must be non-negative and < {len(model.inputs)}." ) else: raise ValueError(f"Key value: {key} must be string or integer.") return index def get_name(model, index: int): """Get the input name corresponding to the input index""" return model.inputs[index] class _BoundingDomain(abc.ABC): """ Base class for ModelBoundingBox and CompoundBoundingBox. This is where all the `~astropy.modeling.core.Model` evaluation code for evaluating with a bounding box is because it is common to both types of bounding box. Parameters ---------- model : `~astropy.modeling.Model` The Model this bounding domain is for. prepare_inputs : Generates the necessary input information so that model can be evaluated only for input points entirely inside bounding_box. This needs to be implemented by a subclass. Note that most of the implementation is in ModelBoundingBox. prepare_outputs : Fills the output values in for any input points outside the bounding_box. evaluate : Performs a complete model evaluation while enforcing the bounds on the inputs and returns a complete output. """ def __init__(self, model, ignored: List[int] = None, order: str = "C"): self._model = model self._ignored = self._validate_ignored(ignored) self._order = self._get_order(order) @property def model(self): return self._model @property def order(self) -> str: return self._order @property def ignored(self) -> List[int]: return self._ignored def _get_order(self, order: str = None) -> str: """ Get if bounding_box is C/python ordered or Fortran/mathematically ordered """ if order is None: order = self._order if order not in ("C", "F"): raise ValueError( "order must be either 'C' (C/python order) or " f"'F' (Fortran/mathematical order), got: {order}." ) return order def _get_index(self, key) -> int: """ Get the input index corresponding to the given key. Can pass in either: the string name of the input or the input index itself. """ return get_index(self._model, key) def _get_name(self, index: int): """Get the input name corresponding to the input index""" return get_name(self._model, index) @property def ignored_inputs(self) -> List[str]: return [self._get_name(index) for index in self._ignored] def _validate_ignored(self, ignored: list) -> List[int]: if ignored is None: return [] else: return [self._get_index(key) for key in ignored] def __call__(self, *args, **kwargs): raise NotImplementedError( "This bounding box is fixed by the model and does not have " "adjustable parameters." ) @abc.abstractmethod def fix_inputs(self, model, fixed_inputs: dict): """ Fix the bounding_box for a `fix_inputs` compound model. Parameters ---------- model : `~astropy.modeling.Model` The new model for which this will be a bounding_box fixed_inputs : dict Dictionary of inputs which have been fixed by this bounding box. """ raise NotImplementedError("This should be implemented by a child class.") @abc.abstractmethod def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]: """ Get prepare the inputs with respect to the bounding box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box all_out: bool if all of the inputs are outside the bounding_box """ raise NotImplementedError("This has not been implemented for BoundingDomain.") @staticmethod def _base_output(input_shape, fill_value): """ Create a baseline output, assuming that the entire input is outside the bounding box Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- An array of the correct shape containing all fill_value """ return np.zeros(input_shape) + fill_value def _all_out_output(self, input_shape, fill_value): """ Create output if all inputs are outside the domain Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- A full set of outputs for case that all inputs are outside domain. """ return [ self._base_output(input_shape, fill_value) for _ in range(self._model.n_outputs) ], None def _modify_output(self, valid_output, valid_index, input_shape, fill_value): """ For a single output fill in all the parts corresponding to inputs outside the bounding box. Parameters ---------- valid_output : numpy array The output from the model corresponding to inputs inside the bounding box valid_index : numpy array array of all indices of inputs inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- An output array with all the indices corresponding to inputs outside the bounding box filled in by fill_value """ output = self._base_output(input_shape, fill_value) if not output.shape: output = np.array(valid_output) else: output[valid_index] = valid_output if np.isscalar(valid_output): output = output.item(0) return output def _prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value): """ Fill in all the outputs of the model corresponding to inputs outside the bounding_box. Parameters ---------- valid_outputs : list of numpy array The list of outputs from the model corresponding to inputs inside the bounding box valid_index : numpy array array of all indices of inputs inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box Returns ------- List of filled in output arrays. """ outputs = [] for valid_output in valid_outputs: outputs.append( self._modify_output(valid_output, valid_index, input_shape, fill_value) ) return outputs def prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value): """ Fill in all the outputs of the model corresponding to inputs outside the bounding_box, adjusting any single output model so that its output becomes a list of containing that output. Parameters ---------- valid_outputs : list The list of outputs from the model corresponding to inputs inside the bounding box valid_index : array_like array of all indices of inputs inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box """ if self._model.n_outputs == 1: valid_outputs = [valid_outputs] return self._prepare_outputs( valid_outputs, valid_index, input_shape, fill_value ) @staticmethod def _get_valid_outputs_unit(valid_outputs, with_units: bool): """ Get the unit for outputs if one is required. Parameters ---------- valid_outputs : list of numpy array The list of outputs from the model corresponding to inputs inside the bounding box with_units : bool whether or not a unit is required """ if with_units: return getattr(valid_outputs, "unit", None) def _evaluate_model( self, evaluate: Callable, valid_inputs, valid_index, input_shape, fill_value, with_units: bool, ): """ Evaluate the model using the given evaluate routine Parameters ---------- evaluate : Callable callable which takes in the valid inputs to evaluate model valid_inputs : list of numpy arrays The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : numpy array array of all indices inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box with_units : bool whether or not a unit is required Returns ------- outputs : list containing filled in output values valid_outputs_unit : the unit that will be attached to the outputs """ valid_outputs = evaluate(valid_inputs) valid_outputs_unit = self._get_valid_outputs_unit(valid_outputs, with_units) return ( self.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value), valid_outputs_unit, ) def _evaluate( self, evaluate: Callable, inputs, input_shape, fill_value, with_units: bool ): """ Perform model evaluation steps: prepare_inputs -> evaluate -> prepare_outputs Parameters ---------- evaluate : Callable callable which takes in the valid inputs to evaluate model valid_inputs : list of numpy arrays The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : numpy array array of all indices inside the bounding box input_shape : tuple The shape that all inputs have be reshaped/broadcasted into fill_value : float The value which will be assigned to inputs which are outside the bounding box with_units : bool whether or not a unit is required Returns ------- outputs : list containing filled in output values valid_outputs_unit : the unit that will be attached to the outputs """ valid_inputs, valid_index, all_out = self.prepare_inputs(input_shape, inputs) if all_out: return self._all_out_output(input_shape, fill_value) else: return self._evaluate_model( evaluate, valid_inputs, valid_index, input_shape, fill_value, with_units ) @staticmethod def _set_outputs_unit(outputs, valid_outputs_unit): """ Set the units on the outputs prepare_inputs -> evaluate -> prepare_outputs -> set output units Parameters ---------- outputs : list containing filled in output values valid_outputs_unit : the unit that will be attached to the outputs Returns ------- List containing filled in output values and units """ if valid_outputs_unit is not None: return Quantity(outputs, valid_outputs_unit, copy=False, subok=True) return outputs def evaluate(self, evaluate: Callable, inputs, fill_value): """ Perform full model evaluation steps: prepare_inputs -> evaluate -> prepare_outputs -> set output units Parameters ---------- evaluate : callable callable which takes in the valid inputs to evaluate model valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box fill_value : float The value which will be assigned to inputs which are outside the bounding box """ input_shape = self._model.input_shape(inputs) # NOTE: CompoundModel does not currently support units during # evaluation for bounding_box so this feature is turned off # for CompoundModel(s). outputs, valid_outputs_unit = self._evaluate( evaluate, inputs, input_shape, fill_value, self._model.bbox_with_units ) return tuple(self._set_outputs_unit(outputs, valid_outputs_unit)) class ModelBoundingBox(_BoundingDomain): """ A model's bounding box Parameters ---------- intervals : dict A dictionary containing all the intervals for each model input keys -> input index values -> interval for that index model : `~astropy.modeling.Model` The Model this bounding_box is for. ignored : list A list containing all the inputs (index) which will not be checked for whether or not their elements are in/out of an interval. order : optional, str The ordering that is assumed for the tuple representation of this bounding_box. Options: 'C': C/Python order, e.g. z, y, x. (default), 'F': Fortran/mathematical notation order, e.g. x, y, z. """ def __init__( self, intervals: Dict[int, _Interval], model, ignored: List[int] = None, order: str = "C", ): super().__init__(model, ignored, order) self._intervals = {} if intervals != () and intervals != {}: self._validate(intervals, order=order) def copy(self, ignored=None): intervals = { index: interval.copy() for index, interval in self._intervals.items() } if ignored is None: ignored = self._ignored.copy() return ModelBoundingBox( intervals, self._model, ignored=ignored, order=self._order ) @property def intervals(self) -> Dict[int, _Interval]: """Return bounding_box labeled using input positions""" return self._intervals @property def named_intervals(self) -> Dict[str, _Interval]: """Return bounding_box labeled using input names""" return {self._get_name(index): bbox for index, bbox in self._intervals.items()} def __repr__(self): parts = ["ModelBoundingBox(", " intervals={"] for name, interval in self.named_intervals.items(): parts.append(f" {name}: {interval}") parts.append(" }") if len(self._ignored) > 0: parts.append(f" ignored={self.ignored_inputs}") parts.append( f" model={self._model.__class__.__name__}(inputs={self._model.inputs})" ) parts.append(f" order='{self._order}'") parts.append(")") return "\n".join(parts) def __len__(self): return len(self._intervals) def __contains__(self, key): try: return self._get_index(key) in self._intervals or self._ignored except (IndexError, ValueError): return False def has_interval(self, key): return self._get_index(key) in self._intervals def __getitem__(self, key): """Get bounding_box entries by either input name or input index""" index = self._get_index(key) if index in self._ignored: return _ignored_interval else: return self._intervals[self._get_index(key)] def bounding_box(self, order: str = None): """ Return the old tuple of tuples representation of the bounding_box order='C' corresponds to the old bounding_box ordering order='F' corresponds to the gwcs bounding_box ordering. """ if len(self._intervals) == 1: return tuple(list(self._intervals.values())[0]) else: order = self._get_order(order) inputs = self._model.inputs if order == "C": inputs = inputs[::-1] bbox = tuple(tuple(self[input_name]) for input_name in inputs) if len(bbox) == 1: bbox = bbox[0] return bbox def __eq__(self, value): """Note equality can be either with old representation or new one.""" if isinstance(value, tuple): return self.bounding_box() == value elif isinstance(value, ModelBoundingBox): return (self.intervals == value.intervals) and ( self.ignored == value.ignored ) else: return False def __setitem__(self, key, value): """Validate and store interval under key (input index or input name).""" index = self._get_index(key) if index in self._ignored: self._ignored.remove(index) self._intervals[index] = _Interval.validate(value) def __delitem__(self, key): """Delete stored interval""" index = self._get_index(key) if index in self._ignored: raise RuntimeError(f"Cannot delete ignored input: {key}!") del self._intervals[index] self._ignored.append(index) def _validate_dict(self, bounding_box: dict): """Validate passing dictionary of intervals and setting them.""" for key, value in bounding_box.items(): self[key] = value @property def _available_input_index(self): model_input_index = [self._get_index(_input) for _input in self._model.inputs] return [_input for _input in model_input_index if _input not in self._ignored] def _validate_sequence(self, bounding_box, order: str = None): """ Validate passing tuple of tuples representation (or related) and setting them. """ order = self._get_order(order) if order == "C": # If bounding_box is C/python ordered, it needs to be reversed # to be in Fortran/mathematical/input order. bounding_box = bounding_box[::-1] for index, value in enumerate(bounding_box): self[self._available_input_index[index]] = value @property def _n_inputs(self) -> int: n_inputs = self._model.n_inputs - len(self._ignored) if n_inputs > 0: return n_inputs else: return 0 def _validate_iterable(self, bounding_box, order: str = None): """Validate and set any iterable representation""" if len(bounding_box) != self._n_inputs: raise ValueError( f"Found {len(bounding_box)} intervals, " f"but must have exactly {self._n_inputs}." ) if isinstance(bounding_box, dict): self._validate_dict(bounding_box) else: self._validate_sequence(bounding_box, order) def _validate(self, bounding_box, order: str = None): """Validate and set any representation""" if self._n_inputs == 1 and not isinstance(bounding_box, dict): self[self._available_input_index[0]] = bounding_box else: self._validate_iterable(bounding_box, order) @classmethod def validate( cls, model, bounding_box, ignored: list = None, order: str = "C", _preserve_ignore: bool = False, **kwargs, ): """ Construct a valid bounding box for a model. Parameters ---------- model : `~astropy.modeling.Model` The model for which this will be a bounding_box bounding_box : dict, tuple A possible representation of the bounding box order : optional, str The order that a tuple representation will be assumed to be Default: 'C' """ if isinstance(bounding_box, ModelBoundingBox): order = bounding_box.order if _preserve_ignore: ignored = bounding_box.ignored bounding_box = bounding_box.named_intervals new = cls({}, model, ignored=ignored, order=order) new._validate(bounding_box) return new def fix_inputs(self, model, fixed_inputs: dict, _keep_ignored=False): """ Fix the bounding_box for a `fix_inputs` compound model. Parameters ---------- model : `~astropy.modeling.Model` The new model for which this will be a bounding_box fixed_inputs : dict Dictionary of inputs which have been fixed by this bounding box. keep_ignored : bool Keep the ignored inputs of the bounding box (internal argument only) """ new = self.copy() for _input in fixed_inputs.keys(): del new[_input] if _keep_ignored: ignored = new.ignored else: ignored = None return ModelBoundingBox.validate( model, new.named_intervals, ignored=ignored, order=new._order ) @property def dimension(self): return len(self) def domain(self, resolution, order: str = None): inputs = self._model.inputs order = self._get_order(order) if order == "C": inputs = inputs[::-1] return [self[input_name].domain(resolution) for input_name in inputs] def _outside(self, input_shape, inputs): """ Get all the input positions which are outside the bounding_box, so that the corresponding outputs can be filled with the fill value (default NaN). Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- outside_index : bool-numpy array True -> position outside bounding_box False -> position inside bounding_box all_out : bool if all of the inputs are outside the bounding_box """ all_out = False outside_index = np.zeros(input_shape, dtype=bool) for index, _input in enumerate(inputs): _input = np.asanyarray(_input) outside = np.broadcast_to(self[index].outside(_input), input_shape) outside_index[outside] = True if outside_index.all(): all_out = True break return outside_index, all_out def _valid_index(self, input_shape, inputs): """ Get the indices of all the inputs inside the bounding_box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_index : numpy array array of all indices inside the bounding box all_out : bool if all of the inputs are outside the bounding_box """ outside_index, all_out = self._outside(input_shape, inputs) valid_index = np.atleast_1d(np.logical_not(outside_index)).nonzero() if len(valid_index[0]) == 0: all_out = True return valid_index, all_out def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]: """ Get prepare the inputs with respect to the bounding box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box all_out: bool if all of the inputs are outside the bounding_box """ valid_index, all_out = self._valid_index(input_shape, inputs) valid_inputs = [] if not all_out: for _input in inputs: if input_shape: valid_input = np.broadcast_to(np.atleast_1d(_input), input_shape)[ valid_index ] if np.isscalar(_input): valid_input = valid_input.item(0) valid_inputs.append(valid_input) else: valid_inputs.append(_input) return tuple(valid_inputs), valid_index, all_out _BaseSelectorArgument = namedtuple("_BaseSelectorArgument", "index ignore") class _SelectorArgument(_BaseSelectorArgument): """ Contains a single CompoundBoundingBox slicing input. Parameters ---------- index : int The index of the input in the input list ignore : bool Whether or not this input will be ignored by the bounding box. Methods ------- validate : Returns a valid SelectorArgument for a given model. get_selector : Returns the value of the input for use in finding the correct bounding_box. get_fixed_value : Gets the slicing value from a fix_inputs set of values. """ def __new__(cls, index, ignore): self = super().__new__(cls, index, ignore) return self @classmethod def validate(cls, model, argument, ignored: bool = True): """ Construct a valid selector argument for a CompoundBoundingBox. Parameters ---------- model : `~astropy.modeling.Model` The model for which this will be an argument for. argument : int or str A representation of which evaluation input to use ignored : optional, bool Whether or not to ignore this argument in the ModelBoundingBox. Returns ------- Validated selector_argument """ return cls(get_index(model, argument), ignored) def get_selector(self, *inputs): """ Get the selector value corresponding to this argument Parameters ---------- *inputs : All the processed model evaluation inputs. """ _selector = inputs[self.index] if isiterable(_selector): if len(_selector) == 1: return _selector[0] else: return tuple(_selector) return _selector def name(self, model) -> str: """ Get the name of the input described by this selector argument Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. """ return get_name(model, self.index) def pretty_repr(self, model): """ Get a pretty-print representation of this object Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. """ return f"Argument(name='{self.name(model)}', ignore={self.ignore})" def get_fixed_value(self, model, values: dict): """ Gets the value fixed input corresponding to this argument Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. values : dict Dictionary of fixed inputs. """ if self.index in values: return values[self.index] else: if self.name(model) in values: return values[self.name(model)] else: raise RuntimeError( f"{self.pretty_repr(model)} was not found in {values}" ) def is_argument(self, model, argument) -> bool: """ Determine if passed argument is described by this selector argument Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. argument : int or str A representation of which evaluation input is being used """ return self.index == get_index(model, argument) def named_tuple(self, model): """ Get a tuple representation of this argument using the input name from the model. Parameters ---------- model : `~astropy.modeling.Model` The Model this selector argument is for. """ return (self.name(model), self.ignore) class _SelectorArguments(tuple): """ Contains the CompoundBoundingBox slicing description Parameters ---------- input_ : The SelectorArgument values Methods ------- validate : Returns a valid SelectorArguments for its model. get_selector : Returns the selector a set of inputs corresponds to. is_selector : Determines if a selector is correctly formatted for this CompoundBoundingBox. get_fixed_value : Gets the selector from a fix_inputs set of values. """ _kept_ignore = None def __new__(cls, input_: Tuple[_SelectorArgument], kept_ignore: List = None): self = super().__new__(cls, input_) if kept_ignore is None: self._kept_ignore = [] else: self._kept_ignore = kept_ignore return self def pretty_repr(self, model): """ Get a pretty-print representation of this object Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. """ parts = ["SelectorArguments("] for argument in self: parts.append(f" {argument.pretty_repr(model)}") parts.append(")") return "\n".join(parts) @property def ignore(self): """Get the list of ignored inputs""" ignore = [argument.index for argument in self if argument.ignore] ignore.extend(self._kept_ignore) return ignore @property def kept_ignore(self): """The arguments to persist in ignoring""" return self._kept_ignore @classmethod def validate(cls, model, arguments, kept_ignore: List = None): """ Construct a valid Selector description for a CompoundBoundingBox. Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. arguments : The individual argument informations kept_ignore : Arguments to persist as ignored """ inputs = [] for argument in arguments: _input = _SelectorArgument.validate(model, *argument) if _input.index in [this.index for this in inputs]: raise ValueError( f"Input: '{get_name(model, _input.index)}' has been repeated." ) inputs.append(_input) if len(inputs) == 0: raise ValueError("There must be at least one selector argument.") if isinstance(arguments, _SelectorArguments): if kept_ignore is None: kept_ignore = [] kept_ignore.extend(arguments.kept_ignore) return cls(tuple(inputs), kept_ignore) def get_selector(self, *inputs): """ Get the selector corresponding to these inputs Parameters ---------- *inputs : All the processed model evaluation inputs. """ return tuple(argument.get_selector(*inputs) for argument in self) def is_selector(self, _selector): """ Determine if this is a reasonable selector Parameters ---------- _selector : tuple The selector to check """ return isinstance(_selector, tuple) and len(_selector) == len(self) def get_fixed_values(self, model, values: dict): """ Gets the value fixed input corresponding to this argument Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. values : dict Dictionary of fixed inputs. """ return tuple(argument.get_fixed_value(model, values) for argument in self) def is_argument(self, model, argument) -> bool: """ Determine if passed argument is one of the selector arguments Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which evaluation input is being used """ for selector_arg in self: if selector_arg.is_argument(model, argument): return True else: return False def selector_index(self, model, argument): """ Get the index of the argument passed in the selector tuples Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which argument is being used """ for index, selector_arg in enumerate(self): if selector_arg.is_argument(model, argument): return index else: raise ValueError( f"{argument} does not correspond to any selector argument." ) def reduce(self, model, argument): """ Reduce the selector arguments by the argument given Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which argument is being used """ arguments = list(self) kept_ignore = [arguments.pop(self.selector_index(model, argument)).index] kept_ignore.extend(self._kept_ignore) return _SelectorArguments.validate(model, tuple(arguments), kept_ignore) def add_ignore(self, model, argument): """ Add argument to the kept_ignore list Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. argument : int or str A representation of which argument is being used """ if self.is_argument(model, argument): raise ValueError( f"{argument}: is a selector argument and cannot be ignored." ) kept_ignore = [get_index(model, argument)] return _SelectorArguments.validate(model, self, kept_ignore) def named_tuple(self, model): """ Get a tuple of selector argument tuples using input names Parameters ---------- model : `~astropy.modeling.Model` The Model these selector arguments are for. """ return tuple(selector_arg.named_tuple(model) for selector_arg in self) class CompoundBoundingBox(_BoundingDomain): """ A model's compound bounding box Parameters ---------- bounding_boxes : dict A dictionary containing all the ModelBoundingBoxes that are possible keys -> _selector (extracted from model inputs) values -> ModelBoundingBox model : `~astropy.modeling.Model` The Model this compound bounding_box is for. selector_args : _SelectorArguments A description of how to extract the selectors from model inputs. create_selector : optional A method which takes in the selector and the model to return a valid bounding corresponding to that selector. This can be used to construct new bounding_boxes for previously undefined selectors. These new boxes are then stored for future lookups. order : optional, str The ordering that is assumed for the tuple representation of the bounding_boxes. """ def __init__( self, bounding_boxes: Dict[Any, ModelBoundingBox], model, selector_args: _SelectorArguments, create_selector: Callable = None, ignored: List[int] = None, order: str = "C", ): super().__init__(model, ignored, order) self._create_selector = create_selector self._selector_args = _SelectorArguments.validate(model, selector_args) self._bounding_boxes = {} self._validate(bounding_boxes) def copy(self): bounding_boxes = { selector: bbox.copy(self.selector_args.ignore) for selector, bbox in self._bounding_boxes.items() } return CompoundBoundingBox( bounding_boxes, self._model, selector_args=self._selector_args, create_selector=copy.deepcopy(self._create_selector), order=self._order, ) def __repr__(self): parts = ["CompoundBoundingBox(", " bounding_boxes={"] # bounding_boxes for _selector, bbox in self._bounding_boxes.items(): bbox_repr = bbox.__repr__().split("\n") parts.append(f" {_selector} = {bbox_repr.pop(0)}") for part in bbox_repr: parts.append(f" {part}") parts.append(" }") # selector_args selector_args_repr = self.selector_args.pretty_repr(self._model).split("\n") parts.append(f" selector_args = {selector_args_repr.pop(0)}") for part in selector_args_repr: parts.append(f" {part}") parts.append(")") return "\n".join(parts) @property def bounding_boxes(self) -> Dict[Any, ModelBoundingBox]: return self._bounding_boxes @property def selector_args(self) -> _SelectorArguments: return self._selector_args @selector_args.setter def selector_args(self, value): self._selector_args = _SelectorArguments.validate(self._model, value) warnings.warn( "Overriding selector_args may cause problems you should re-validate " "the compound bounding box before use!", RuntimeWarning, ) @property def named_selector_tuple(self) -> tuple: return self._selector_args.named_tuple(self._model) @property def create_selector(self): return self._create_selector @staticmethod def _get_selector_key(key): if isiterable(key): return tuple(key) else: return (key,) def __setitem__(self, key, value): _selector = self._get_selector_key(key) if not self.selector_args.is_selector(_selector): raise ValueError(f"{_selector} is not a selector!") ignored = self.selector_args.ignore + self.ignored self._bounding_boxes[_selector] = ModelBoundingBox.validate( self._model, value, ignored, order=self._order ) def _validate(self, bounding_boxes: dict): for _selector, bounding_box in bounding_boxes.items(): self[_selector] = bounding_box def __eq__(self, value): if isinstance(value, CompoundBoundingBox): return ( self.bounding_boxes == value.bounding_boxes and self.selector_args == value.selector_args and self.create_selector == value.create_selector ) else: return False @classmethod def validate( cls, model, bounding_box: dict, selector_args=None, create_selector=None, ignored: list = None, order: str = "C", _preserve_ignore: bool = False, **kwarg, ): """ Construct a valid compound bounding box for a model. Parameters ---------- model : `~astropy.modeling.Model` The model for which this will be a bounding_box bounding_box : dict Dictionary of possible bounding_box respresentations selector_args : optional Description of the selector arguments create_selector : optional, callable Method for generating new selectors order : optional, str The order that a tuple representation will be assumed to be Default: 'C' """ if isinstance(bounding_box, CompoundBoundingBox): if selector_args is None: selector_args = bounding_box.selector_args if create_selector is None: create_selector = bounding_box.create_selector order = bounding_box.order if _preserve_ignore: ignored = bounding_box.ignored bounding_box = bounding_box.bounding_boxes if selector_args is None: raise ValueError( "Selector arguments must be provided " "(can be passed as part of bounding_box argument)" ) return cls( bounding_box, model, selector_args, create_selector=create_selector, ignored=ignored, order=order, ) def __contains__(self, key): return key in self._bounding_boxes def _create_bounding_box(self, _selector): self[_selector] = self._create_selector(_selector, model=self._model) return self[_selector] def __getitem__(self, key): _selector = self._get_selector_key(key) if _selector in self: return self._bounding_boxes[_selector] elif self._create_selector is not None: return self._create_bounding_box(_selector) else: raise RuntimeError(f"No bounding box is defined for selector: {_selector}.") def _select_bounding_box(self, inputs) -> ModelBoundingBox: _selector = self.selector_args.get_selector(*inputs) return self[_selector] def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]: """ Get prepare the inputs with respect to the bounding box. Parameters ---------- input_shape : tuple The shape that all inputs have be reshaped/broadcasted into inputs : list List of all the model inputs Returns ------- valid_inputs : list The inputs reduced to just those inputs which are all inside their respective bounding box intervals valid_index : array_like array of all indices inside the bounding box all_out: bool if all of the inputs are outside the bounding_box """ bounding_box = self._select_bounding_box(inputs) return bounding_box.prepare_inputs(input_shape, inputs) def _matching_bounding_boxes(self, argument, value) -> Dict[Any, ModelBoundingBox]: selector_index = self.selector_args.selector_index(self._model, argument) matching = {} for selector_key, bbox in self._bounding_boxes.items(): if selector_key[selector_index] == value: new_selector_key = list(selector_key) new_selector_key.pop(selector_index) if bbox.has_interval(argument): new_bbox = bbox.fix_inputs( self._model, {argument: value}, _keep_ignored=True ) else: new_bbox = bbox.copy() matching[tuple(new_selector_key)] = new_bbox if len(matching) == 0: raise ValueError( f"Attempting to fix input {argument}, but there are no " f"bounding boxes for argument value {value}." ) return matching def _fix_input_selector_arg(self, argument, value): matching_bounding_boxes = self._matching_bounding_boxes(argument, value) if len(self.selector_args) == 1: return matching_bounding_boxes[()] else: return CompoundBoundingBox( matching_bounding_boxes, self._model, self.selector_args.reduce(self._model, argument), ) def _fix_input_bbox_arg(self, argument, value): bounding_boxes = {} for selector_key, bbox in self._bounding_boxes.items(): bounding_boxes[selector_key] = bbox.fix_inputs( self._model, {argument: value}, _keep_ignored=True ) return CompoundBoundingBox( bounding_boxes, self._model, self.selector_args.add_ignore(self._model, argument), ) def fix_inputs(self, model, fixed_inputs: dict): """ Fix the bounding_box for a `fix_inputs` compound model. Parameters ---------- model : `~astropy.modeling.Model` The new model for which this will be a bounding_box fixed_inputs : dict Dictionary of inputs which have been fixed by this bounding box. """ fixed_input_keys = list(fixed_inputs.keys()) argument = fixed_input_keys.pop() value = fixed_inputs[argument] if self.selector_args.is_argument(self._model, argument): bbox = self._fix_input_selector_arg(argument, value) else: bbox = self._fix_input_bbox_arg(argument, value) if len(fixed_input_keys) > 0: new_fixed_inputs = fixed_inputs.copy() del new_fixed_inputs[argument] bbox = bbox.fix_inputs(model, new_fixed_inputs) if isinstance(bbox, CompoundBoundingBox): selector_args = bbox.named_selector_tuple bbox_dict = bbox elif isinstance(bbox, ModelBoundingBox): selector_args = None bbox_dict = bbox.named_intervals return bbox.__class__.validate( model, bbox_dict, order=bbox.order, selector_args=selector_args )
01c897946e29e50ea10882e0cb712d7c2a78d7f72035792ac35b7ad005d7f75b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Mathematical models.""" # pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name import numpy as np from astropy import units as u from astropy.units import Quantity, UnitsError from .core import Fittable1DModel, Fittable2DModel from .parameters import InputParameterError, Parameter from .utils import ellipse_extent __all__ = [ "AiryDisk2D", "Moffat1D", "Moffat2D", "Box1D", "Box2D", "Const1D", "Const2D", "Ellipse2D", "Disk2D", "Gaussian1D", "Gaussian2D", "Linear1D", "Lorentz1D", "RickerWavelet1D", "RickerWavelet2D", "RedshiftScaleFactor", "Multiply", "Planar2D", "Scale", "Sersic1D", "Sersic2D", "Shift", "Sine1D", "Cosine1D", "Tangent1D", "ArcSine1D", "ArcCosine1D", "ArcTangent1D", "Trapezoid1D", "TrapezoidDisk2D", "Ring2D", "Voigt1D", "KingProjectedAnalytic1D", "Exponential1D", "Logarithmic1D", ] TWOPI = 2 * np.pi FLOAT_EPSILON = float(np.finfo(np.float32).tiny) # Note that we define this here rather than using the value defined in # astropy.stats to avoid importing astropy.stats every time astropy.modeling # is loaded. GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0)) class Gaussian1D(Fittable1DModel): """ One dimensional Gaussian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Amplitude (peak value) of the Gaussian - for a normalized profile (integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi)) mean : float or `~astropy.units.Quantity`. Mean of the Gaussian. stddev : float or `~astropy.units.Quantity`. Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)). Notes ----- Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}} Examples -------- >>> from astropy.modeling import models >>> def tie_center(model): ... mean = 50 * model.stddev ... return mean >>> tied_parameters = {'mean': tie_center} Specify that 'mean' is a tied parameter in one of two ways: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... tied=tied_parameters) or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.mean.tied False >>> g1.mean.tied = tie_center >>> g1.mean.tied <function tie_center at 0x...> Fixed parameters: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... fixed={'stddev': True}) >>> g1.stddev.fixed True or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.stddev.fixed False >>> g1.stddev.fixed = True >>> g1.stddev.fixed True .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Gaussian1D plt.figure() s1 = Gaussian1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() See Also -------- Gaussian2D, Box1D, Moffat1D, Lorentz1D """ amplitude = Parameter( default=1, description="Amplitude (peak value) of the Gaussian" ) mean = Parameter(default=0, description="Position of peak (Gaussian)") # Ensure stddev makes sense if its bounds are not explicitly set. # stddev must be non-zero and positive. stddev = Parameter( default=1, bounds=(FLOAT_EPSILON, None), description="Standard deviation of the Gaussian", ) def bounding_box(self, factor=5.5): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` Parameters ---------- factor : float The multiple of `stddev` used to define the limits. The default is 5.5, corresponding to a relative error < 1e-7. Examples -------- >>> from astropy.modeling.models import Gaussian1D >>> model = Gaussian1D(mean=0, stddev=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-11.0, upper=11.0) } model=Gaussian1D(inputs=('x',)) order='C' ) This range can be set directly (see: `Model.bounding_box <astropy.modeling.Model.bounding_box>`) or by using a different factor, like: >>> model.bounding_box = model.bounding_box(factor=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-4.0, upper=4.0) } model=Gaussian1D(inputs=('x',)) order='C' ) """ x0 = self.mean dx = factor * self.stddev return (x0 - dx, x0 + dx) @property def fwhm(self): """Gaussian full width at half maximum.""" return self.stddev * GAUSSIAN_SIGMA_TO_FWHM @staticmethod def evaluate(x, amplitude, mean, stddev): """ Gaussian1D model function. """ return amplitude * np.exp(-0.5 * (x - mean) ** 2 / stddev**2) @staticmethod def fit_deriv(x, amplitude, mean, stddev): """ Gaussian1D model function derivatives. """ d_amplitude = np.exp(-0.5 / stddev**2 * (x - mean) ** 2) d_mean = amplitude * d_amplitude * (x - mean) / stddev**2 d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev**3 return [d_amplitude, d_mean, d_stddev] @property def input_units(self): if self.mean.unit is None: return None return {self.inputs[0]: self.mean.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "mean": inputs_unit[self.inputs[0]], "stddev": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Gaussian2D(Fittable2DModel): r""" Two dimensional Gaussian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Amplitude (peak value) of the Gaussian. x_mean : float or `~astropy.units.Quantity`. Mean of the Gaussian in x. y_mean : float or `~astropy.units.Quantity`. Mean of the Gaussian in y. x_stddev : float or `~astropy.units.Quantity` or None. Standard deviation of the Gaussian in x before rotating by theta. Must be None if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, ``None`` means the default value (1). y_stddev : float or `~astropy.units.Quantity` or None. Standard deviation of the Gaussian in y before rotating by theta. Must be None if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, ``None`` means the default value (1). theta : float or `~astropy.units.Quantity`, optional. The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise. Must be `None` if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, `None` means the default value (0). cov_matrix : ndarray, optional A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` defaults. Notes ----- Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right) \left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}} Using the following definitions: .. math:: a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} + \frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right) b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} - \frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right) c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} + \frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right) If using a ``cov_matrix``, the model is of the form: .. math:: f(x, y) = A e^{-0.5 \left( \vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0} \right)} where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`, and :math:`\Sigma` is the covariance matrix: .. math:: \Sigma = \left(\begin{array}{ccc} \sigma_x^2 & \rho \sigma_x \sigma_y \\ \rho \sigma_x \sigma_y & \sigma_y^2 \end{array}\right) :math:`\rho` is the correlation between ``x`` and ``y``, which should be between -1 and +1. Positive correlation corresponds to a ``theta`` in the range 0 to 90 degrees. Negative correlation corresponds to a ``theta`` in the range of 0 to -90 degrees. See [1]_ for more details about the 2D Gaussian function. See Also -------- Gaussian1D, Box2D, Moffat2D References ---------- .. [1] https://en.wikipedia.org/wiki/Gaussian_function """ amplitude = Parameter(default=1, description="Amplitude of the Gaussian") x_mean = Parameter( default=0, description="Peak position (along x axis) of Gaussian" ) y_mean = Parameter( default=0, description="Peak position (along y axis) of Gaussian" ) x_stddev = Parameter( default=1, description="Standard deviation of the Gaussian (along x axis)" ) y_stddev = Parameter( default=1, description="Standard deviation of the Gaussian (along y axis)" ) theta = Parameter( default=0.0, description=( "Rotation angle either as a " "float (in radians) or a " "|Quantity| angle (optional)" ), ) def __init__( self, amplitude=amplitude.default, x_mean=x_mean.default, y_mean=y_mean.default, x_stddev=None, y_stddev=None, theta=None, cov_matrix=None, **kwargs, ): if cov_matrix is None: if x_stddev is None: x_stddev = self.__class__.x_stddev.default if y_stddev is None: y_stddev = self.__class__.y_stddev.default if theta is None: theta = self.__class__.theta.default else: if x_stddev is not None or y_stddev is not None or theta is not None: raise InputParameterError( "Cannot specify both cov_matrix and x/y_stddev/theta" ) # Compute principle coordinate system transformation cov_matrix = np.array(cov_matrix) if cov_matrix.shape != (2, 2): raise ValueError("Covariance matrix must be 2x2") eig_vals, eig_vecs = np.linalg.eig(cov_matrix) x_stddev, y_stddev = np.sqrt(eig_vals) y_vec = eig_vecs[:, 0] theta = np.arctan2(y_vec[1], y_vec[0]) # Ensure stddev makes sense if its bounds are not explicitly set. # stddev must be non-zero and positive. # TODO: Investigate why setting this in Parameter above causes # convolution tests to hang. kwargs.setdefault("bounds", {}) kwargs["bounds"].setdefault("x_stddev", (FLOAT_EPSILON, None)) kwargs["bounds"].setdefault("y_stddev", (FLOAT_EPSILON, None)) super().__init__( amplitude=amplitude, x_mean=x_mean, y_mean=y_mean, x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs, ) @property def x_fwhm(self): """Gaussian full width at half maximum in X.""" return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM @property def y_fwhm(self): """Gaussian full width at half maximum in Y.""" return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM def bounding_box(self, factor=5.5): """ Tuple defining the default ``bounding_box`` limits in each dimension, ``((y_low, y_high), (x_low, x_high))`` The default offset from the mean is 5.5-sigma, corresponding to a relative error < 1e-7. The limits are adjusted for rotation. Parameters ---------- factor : float, optional The multiple of `x_stddev` and `y_stddev` used to define the limits. The default is 5.5. Examples -------- >>> from astropy.modeling.models import Gaussian2D >>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-5.5, upper=5.5) y: Interval(lower=-11.0, upper=11.0) } model=Gaussian2D(inputs=('x', 'y')) order='C' ) This range can be set directly (see: `Model.bounding_box <astropy.modeling.Model.bounding_box>`) or by using a different factor like: >>> model.bounding_box = model.bounding_box(factor=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-2.0, upper=2.0) y: Interval(lower=-4.0, upper=4.0) } model=Gaussian2D(inputs=('x', 'y')) order='C' ) """ a = factor * self.x_stddev b = factor * self.y_stddev dx, dy = ellipse_extent(a, b, self.theta) return ( (self.y_mean - dy, self.y_mean + dy), (self.x_mean - dx, self.x_mean + dx), ) @staticmethod def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta): """Two dimensional Gaussian function""" cost2 = np.cos(theta) ** 2 sint2 = np.sin(theta) ** 2 sin2t = np.sin(2.0 * theta) xstd2 = x_stddev**2 ystd2 = y_stddev**2 xdiff = x - x_mean ydiff = y - y_mean a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2)) b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2)) c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2)) return amplitude * np.exp( -((a * xdiff**2) + (b * xdiff * ydiff) + (c * ydiff**2)) ) @staticmethod def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta): """Two dimensional Gaussian function derivative with respect to parameters""" cost = np.cos(theta) sint = np.sin(theta) cost2 = np.cos(theta) ** 2 sint2 = np.sin(theta) ** 2 cos2t = np.cos(2.0 * theta) sin2t = np.sin(2.0 * theta) xstd2 = x_stddev**2 ystd2 = y_stddev**2 xstd3 = x_stddev**3 ystd3 = y_stddev**3 xdiff = x - x_mean ydiff = y - y_mean xdiff2 = xdiff**2 ydiff2 = ydiff**2 a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2)) b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2)) c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2)) g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2))) da_dtheta = sint * cost * ((1.0 / ystd2) - (1.0 / xstd2)) da_dx_stddev = -cost2 / xstd3 da_dy_stddev = -sint2 / ystd3 db_dtheta = (cos2t / xstd2) - (cos2t / ystd2) db_dx_stddev = -sin2t / xstd3 db_dy_stddev = sin2t / ystd3 dc_dtheta = -da_dtheta dc_dx_stddev = -sint2 / xstd3 dc_dy_stddev = -cost2 / ystd3 dg_dA = g / amplitude dg_dx_mean = g * ((2.0 * a * xdiff) + (b * ydiff)) dg_dy_mean = g * ((b * xdiff) + (2.0 * c * ydiff)) dg_dx_stddev = g * ( -( da_dx_stddev * xdiff2 + db_dx_stddev * xdiff * ydiff + dc_dx_stddev * ydiff2 ) ) dg_dy_stddev = g * ( -( da_dy_stddev * xdiff2 + db_dy_stddev * xdiff * ydiff + dc_dy_stddev * ydiff2 ) ) dg_dtheta = g * ( -(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2) ) return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta] @property def input_units(self): if self.x_mean.unit is None and self.y_mean.unit is None: return None return {self.inputs[0]: self.x_mean.unit, self.inputs[1]: self.y_mean.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_mean": inputs_unit[self.inputs[0]], "y_mean": inputs_unit[self.inputs[0]], "x_stddev": inputs_unit[self.inputs[0]], "y_stddev": inputs_unit[self.inputs[0]], "theta": u.rad, "amplitude": outputs_unit[self.outputs[0]], } class Shift(Fittable1DModel): """ Shift a coordinate. Parameters ---------- offset : float Offset to add to a coordinate. """ offset = Parameter(default=0, description="Offset to add to a model") linear = True _has_inverse_bounding_box = True @property def input_units(self): if self.offset.unit is None: return None return {self.inputs[0]: self.offset.unit} @property def inverse(self): """One dimensional inverse Shift model function""" inv = self.copy() inv.offset *= -1 try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple( self.evaluate(x, self.offset) for x in self.bounding_box ) return inv @staticmethod def evaluate(x, offset): """One dimensional Shift model function""" return x + offset @staticmethod def sum_of_implicit_terms(x): """Evaluate the implicit term (x) of one dimensional Shift model""" return x @staticmethod def fit_deriv(x, *params): """One dimensional Shift model derivative with respect to parameter""" d_offset = np.ones_like(x) return [d_offset] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"offset": outputs_unit[self.outputs[0]]} class Scale(Fittable1DModel): """ Multiply a model by a dimensionless factor. Parameters ---------- factor : float Factor by which to scale a coordinate. Notes ----- If ``factor`` is a `~astropy.units.Quantity` then the units will be stripped before the scaling operation. """ factor = Parameter(default=1, description="Factor by which to scale a model") linear = True fittable = True _input_units_strict = True _input_units_allow_dimensionless = True _has_inverse_bounding_box = True @property def input_units(self): if self.factor.unit is None: return None return {self.inputs[0]: self.factor.unit} @property def inverse(self): """One dimensional inverse Scale model function""" inv = self.copy() inv.factor = 1 / self.factor try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple( self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box() ) return inv @staticmethod def evaluate(x, factor): """One dimensional Scale model function""" if isinstance(factor, u.Quantity): factor = factor.value return factor * x @staticmethod def fit_deriv(x, *params): """One dimensional Scale model derivative with respect to parameter""" d_factor = x return [d_factor] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"factor": outputs_unit[self.outputs[0]]} class Multiply(Fittable1DModel): """ Multiply a model by a quantity or number. Parameters ---------- factor : float Factor by which to multiply a coordinate. """ factor = Parameter(default=1, description="Factor by which to multiply a model") linear = True fittable = True _has_inverse_bounding_box = True @property def inverse(self): """One dimensional inverse multiply model function""" inv = self.copy() inv.factor = 1 / self.factor try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple( self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box() ) return inv @staticmethod def evaluate(x, factor): """One dimensional multiply model function""" return factor * x @staticmethod def fit_deriv(x, *params): """One dimensional multiply model derivative with respect to parameter""" d_factor = x return [d_factor] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"factor": outputs_unit[self.outputs[0]]} class RedshiftScaleFactor(Fittable1DModel): """ One dimensional redshift scale factor model. Parameters ---------- z : float Redshift value. Notes ----- Model formula: .. math:: f(x) = x (1 + z) """ z = Parameter(description="Redshift", default=0) _has_inverse_bounding_box = True @staticmethod def evaluate(x, z): """One dimensional RedshiftScaleFactor model function""" return (1 + z) * x @staticmethod def fit_deriv(x, z): """One dimensional RedshiftScaleFactor model derivative""" d_z = x return [d_z] @property def inverse(self): """Inverse RedshiftScaleFactor model""" inv = self.copy() inv.z = 1.0 / (1.0 + self.z) - 1.0 try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple( self.evaluate(x, self.z) for x in self.bounding_box.bounding_box() ) return inv class Sersic1D(Fittable1DModel): r""" One dimensional Sersic surface brightness profile. Parameters ---------- amplitude : float Surface brightness at r_eff. r_eff : float Effective (half-light) radius n : float Sersic Index. See Also -------- Gaussian1D, Moffat1D, Lorentz1D Notes ----- Model formula: .. math:: I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\} The constant :math:`b_n` is defined such that :math:`r_e` contains half the total luminosity, and can be solved for numerically. .. math:: \Gamma(2n) = 2\gamma (b_n,2n) Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Sersic1D import matplotlib.pyplot as plt plt.figure() plt.subplot(111, xscale='log', yscale='log') s1 = Sersic1D(amplitude=1, r_eff=5) r=np.arange(0, 100, .01) for n in range(1, 10): s1.n = n plt.plot(r, s1(r), color=str(float(n) / 15)) plt.axis([1e-1, 30, 1e-2, 1e3]) plt.xlabel('log Radius') plt.ylabel('log Surface Brightness') plt.text(.25, 1.5, 'n=1') plt.text(.25, 300, 'n=10') plt.xticks([]) plt.yticks([]) plt.show() References ---------- .. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html """ amplitude = Parameter(default=1, description="Surface brightness at r_eff") r_eff = Parameter(default=1, description="Effective (half-light) radius") n = Parameter(default=4, description="Sersic Index") _gammaincinv = None @classmethod def evaluate(cls, r, amplitude, r_eff, n): """One dimensional Sersic profile function.""" if cls._gammaincinv is None: from scipy.special import gammaincinv cls._gammaincinv = gammaincinv return amplitude * np.exp( -cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1) ) @property def input_units(self): if self.r_eff.unit is None: return None return {self.inputs[0]: self.r_eff.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "r_eff": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class _Trigonometric1D(Fittable1DModel): """ Base class for one dimensional trigonometric and inverse trigonometric models Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase """ amplitude = Parameter(default=1, description="Oscillation amplitude") frequency = Parameter(default=1, description="Oscillation frequency") phase = Parameter(default=0, description="Oscillation phase") @property def input_units(self): if self.frequency.unit is None: return None return {self.inputs[0]: 1.0 / self.frequency.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "frequency": inputs_unit[self.inputs[0]] ** -1, "amplitude": outputs_unit[self.outputs[0]], } class Sine1D(_Trigonometric1D): """ One dimensional Sine model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p) Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Sine1D plt.figure() s1 = Sine1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Sine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.sin(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Sine model derivative""" d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase) d_frequency = ( TWOPI * x * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase) ) d_phase = TWOPI * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Sine""" return ArcSine1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class Cosine1D(_Trigonometric1D): """ One dimensional Cosine model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p) Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Cosine1D plt.figure() s1 = Cosine1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Cosine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.cos(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Cosine model derivative""" d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase) d_frequency = -( TWOPI * x * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase) ) d_phase = -(TWOPI * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Cosine""" return ArcCosine1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class Tangent1D(_Trigonometric1D): """ One dimensional Tangent model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- Sine1D, Cosine1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p) Note that the tangent function is undefined for inputs of the form pi/2 + n*pi for all integers n. Thus thus the default bounding box has been restricted to: .. math:: [(-1/4 - p)/f, (1/4 - p)/f] which is the smallest interval for the tangent function to be continuous on. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Tangent1D plt.figure() s1 = Tangent1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Tangent model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.tan(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Tangent model derivative""" sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase)) ** 2 d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase) d_frequency = TWOPI * x * amplitude * sec d_phase = TWOPI * amplitude * sec return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Tangent""" return ArcTangent1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` """ bbox = [ (-1 / 4 - self.phase) / self.frequency, (1 / 4 - self.phase) / self.frequency, ] if self.frequency.unit is not None: bbox = bbox / self.frequency.unit return bbox class _InverseTrigonometric1D(_Trigonometric1D): """ Base class for one dimensional inverse trigonometric models """ @property def input_units(self): if self.amplitude.unit is None: return None return {self.inputs[0]: self.amplitude.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "frequency": outputs_unit[self.outputs[0]] ** -1, "amplitude": inputs_unit[self.inputs[0]], } class ArcSine1D(_InverseTrigonometric1D): """ One dimensional ArcSine model returning values between -pi/2 and pi/2 only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Sine frequency : float Oscillation frequency for corresponding Sine phase : float Oscillation phase for corresponding Sine See Also -------- Sine1D, ArcCosine1D, ArcTangent1D Notes ----- Model formula: .. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f The arcsin function being used for this model will only accept inputs in [-A, A]; otherwise, a runtime warning will be thrown and the result will be NaN. To avoid this, the bounding_box has been properly set to accommodate this; therefore, it is recommended that this model always be evaluated with the ``with_bounding_box=True`` option. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcSine1D plt.figure() s1 = ArcSine1D(amplitude=1, frequency=.25) r=np.arange(-1, 1, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-1, 1, -np.pi/2, np.pi/2]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcSine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_sine = np.arcsin(argument) / TWOPI return (arc_sine - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcSine model derivative""" d_amplitude = -x / ( TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2) ) d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2 d_phase = -1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` """ return -1 * self.amplitude, 1 * self.amplitude @property def inverse(self): """One dimensional inverse of ArcSine""" return Sine1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class ArcCosine1D(_InverseTrigonometric1D): """ One dimensional ArcCosine returning values between 0 and pi only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Cosine frequency : float Oscillation frequency for corresponding Cosine phase : float Oscillation phase for corresponding Cosine See Also -------- Cosine1D, ArcSine1D, ArcTangent1D Notes ----- Model formula: .. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f The arccos function being used for this model will only accept inputs in [-A, A]; otherwise, a runtime warning will be thrown and the result will be NaN. To avoid this, the bounding_box has been properly set to accommodate this; therefore, it is recommended that this model always be evaluated with the ``with_bounding_box=True`` option. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcCosine1D plt.figure() s1 = ArcCosine1D(amplitude=1, frequency=.25) r=np.arange(-1, 1, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-1, 1, 0, np.pi]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcCosine model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_cos = np.arccos(argument) / TWOPI return (arc_cos - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcCosine model derivative""" d_amplitude = x / ( TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2) ) d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2 d_phase = -1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)`` """ return -1 * self.amplitude, 1 * self.amplitude @property def inverse(self): """One dimensional inverse of ArcCosine""" return Cosine1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class ArcTangent1D(_InverseTrigonometric1D): """ One dimensional ArcTangent model returning values between -pi/2 and pi/2 only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Tangent frequency : float Oscillation frequency for corresponding Tangent phase : float Oscillation phase for corresponding Tangent See Also -------- Tangent1D, ArcSine1D, ArcCosine1D Notes ----- Model formula: .. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcTangent1D plt.figure() s1 = ArcTangent1D(amplitude=1, frequency=.25) r=np.arange(-10, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-10, 10, -np.pi/2, np.pi/2]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcTangent model function""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_cos = np.arctan(argument) / TWOPI return (arc_cos - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcTangent model derivative""" d_amplitude = -x / ( TWOPI * frequency * amplitude**2 * (1 + (x / amplitude) ** 2) ) d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2 d_phase = -1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of ArcTangent""" return Tangent1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class Linear1D(Fittable1DModel): """ One dimensional Line model. Parameters ---------- slope : float Slope of the straight line intercept : float Intercept of the straight line See Also -------- Const1D Notes ----- Model formula: .. math:: f(x) = a x + b """ slope = Parameter(default=1, description="Slope of the straight line") intercept = Parameter(default=0, description="Intercept of the straight line") linear = True @staticmethod def evaluate(x, slope, intercept): """One dimensional Line model function""" return slope * x + intercept @staticmethod def fit_deriv(x, *params): """One dimensional Line model derivative with respect to parameters""" d_slope = x d_intercept = np.ones_like(x) return [d_slope, d_intercept] @property def inverse(self): new_slope = self.slope**-1 new_intercept = -self.intercept / self.slope return self.__class__(slope=new_slope, intercept=new_intercept) @property def input_units(self): if self.intercept.unit is None and self.slope.unit is None: return None return {self.inputs[0]: self.intercept.unit / self.slope.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "intercept": outputs_unit[self.outputs[0]], "slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], } class Planar2D(Fittable2DModel): """ Two dimensional Plane model. Parameters ---------- slope_x : float Slope of the plane in X slope_y : float Slope of the plane in Y intercept : float Z-intercept of the plane Notes ----- Model formula: .. math:: f(x, y) = a x + b y + c """ slope_x = Parameter(default=1, description="Slope of the plane in X") slope_y = Parameter(default=1, description="Slope of the plane in Y") intercept = Parameter(default=0, description="Z-intercept of the plane") linear = True @staticmethod def evaluate(x, y, slope_x, slope_y, intercept): """Two dimensional Plane model function""" return slope_x * x + slope_y * y + intercept @staticmethod def fit_deriv(x, y, *params): """Two dimensional Plane model derivative with respect to parameters""" d_slope_x = x d_slope_y = y d_intercept = np.ones_like(x) return [d_slope_x, d_slope_y, d_intercept] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "intercept": outputs_unit["z"], "slope_x": outputs_unit["z"] / inputs_unit["x"], "slope_y": outputs_unit["z"] / inputs_unit["y"], } class Lorentz1D(Fittable1DModel): """ One dimensional Lorentzian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Peak value - for a normalized profile (integrating to 1), set amplitude = 2 / (np.pi * fwhm) x_0 : float or `~astropy.units.Quantity`. Position of the peak fwhm : float or `~astropy.units.Quantity`. Full width at half maximum (FWHM) See Also -------- Gaussian1D, Box1D, RickerWavelet1D Notes ----- Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}} where :math:`\\gamma` is half of given FWHM. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Lorentz1D plt.figure() s1 = Lorentz1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Peak value") x_0 = Parameter(default=0, description="Position of the peak") fwhm = Parameter(default=1, description="Full width at half maximum") @staticmethod def evaluate(x, amplitude, x_0, fwhm): """One dimensional Lorentzian model function""" return amplitude * ((fwhm / 2.0) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.0) ** 2) @staticmethod def fit_deriv(x, amplitude, x_0, fwhm): """One dimensional Lorentzian model derivative with respect to parameters""" d_amplitude = fwhm**2 / (fwhm**2 + (x - x_0) ** 2) d_x_0 = ( amplitude * d_amplitude * (2 * x - 2 * x_0) / (fwhm**2 + (x - x_0) ** 2) ) d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude) return [d_amplitude, d_x_0, d_fwhm] def bounding_box(self, factor=25): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of FWHM used to define the limits. Default is chosen to include most (99%) of the area under the curve, while still showing the central feature of interest. """ x0 = self.x_0 dx = factor * self.fwhm return (x0 - dx, x0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "fwhm": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Voigt1D(Fittable1DModel): """ One dimensional model for the Voigt profile. Parameters ---------- x_0 : float or `~astropy.units.Quantity` Position of the peak amplitude_L : float or `~astropy.units.Quantity`. The Lorentzian amplitude (peak of the associated Lorentz function) - for a normalized profile (integrating to 1), set amplitude_L = 2 / (np.pi * fwhm_L) fwhm_L : float or `~astropy.units.Quantity` The Lorentzian full width at half maximum fwhm_G : float or `~astropy.units.Quantity`. The Gaussian full width at half maximum method : str, optional Algorithm for computing the complex error function; one of 'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or 'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and reference in accuracy). See Also -------- Gaussian1D, Lorentz1D Notes ----- Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided consistently with compatible units or as unitless numbers. Voigt function is calculated as real part of the complex error function computed from either Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or `~scipy.special.wofz` (implementing 'Faddeeva.cc'). Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Voigt1D import matplotlib.pyplot as plt plt.figure() x = np.arange(0, 10, 0.01) v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9) plt.plot(x, v1(x)) plt.show() """ x_0 = Parameter(default=0, description="Position of the peak") amplitude_L = Parameter(default=1, description="The Lorentzian amplitude") fwhm_L = Parameter( default=2 / np.pi, description="The Lorentzian full width at half maximum" ) fwhm_G = Parameter( default=np.log(2), description="The Gaussian full width at half maximum" ) sqrt_pi = np.sqrt(np.pi) sqrt_ln2 = np.sqrt(np.log(2)) sqrt_ln2pi = np.sqrt(np.log(2) * np.pi) _last_z = np.zeros(1, dtype=complex) _last_w = np.zeros(1, dtype=float) _faddeeva = None def __init__( self, x_0=x_0.default, amplitude_L=amplitude_L.default, fwhm_L=fwhm_L.default, fwhm_G=fwhm_G.default, method="humlicek2", **kwargs, ): if str(method).lower() in ("wofz", "scipy"): from scipy.special import wofz self._faddeeva = wofz elif str(method).lower() == "humlicek2": self._faddeeva = self._hum2zpf16c else: raise ValueError( f"Not a valid method for Voigt1D Faddeeva function: {method}." ) self.method = self._faddeeva.__name__ super().__init__( x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs ) def _wrap_wofz(self, z): """Call complex error (Faddeeva) function w(z) implemented by algorithm `method`; cache results for consecutive calls from `evaluate`, `fit_deriv`.""" if z.shape == self._last_z.shape and np.allclose( z, self._last_z, rtol=1.0e-14, atol=1.0e-15 ): return self._last_w self._last_w = self._faddeeva(z) self._last_z = z return self._last_w def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): """One dimensional Voigt function scaled to Lorentz peak amplitude.""" z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G # The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ; # for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): """ Derivative of the one dimensional Voigt function with respect to parameters. """ s = self.sqrt_ln2 / fwhm_G z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s # V * constant from McLean implementation (== their Voigt function) w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi # Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L) dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L return [ -dwdz.real * 2 * s, w.real / amplitude_L, w.real / fwhm_L - dwdz.imag * s, (-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G, ] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "fwhm_L": inputs_unit[self.inputs[0]], "fwhm_G": inputs_unit[self.inputs[0]], "amplitude_L": outputs_unit[self.outputs[0]], } @staticmethod def _hum2zpf16c(z, s=10.0): """Complex error function w(z) for z = x + iy combining Humlicek's rational approximations: |x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II; else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35 Version using a mask and np.place; single complex argument version of Franz Schreier's cpfX.hum2zpf16m. Originally licensed under a 3-clause BSD style license - see https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py """ # Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35 # fmt: off AA = np.array( [ +46236.3358828121, -147726.58393079657j, -206562.80451354137, 281369.1590631087j, +183092.74968253175, -184787.96830696272j, -66155.39578477248, 57778.05827983565j, +11682.770904216826, -9442.402767960672j, -1052.8438624933142, 814.0996198624186j, +45.94499030751872, -34.59751573708725j, -0.7616559377907136, 0.5641895835476449j, ] ) # 1j/sqrt(pi) to the 12. digit bb = np.array( [ +7918.06640624997, -126689.0625, +295607.8125, -236486.25, +84459.375, -15015.0, +1365.0, -60.0, +1.0, ] ) # fmt: on sqrt_piinv = 1.0 / np.sqrt(np.pi) zz = z * z w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz * (zz - 3.0)) if np.any(z.imag < s): mask = abs(z.real) + z.imag < s # returns true for interior points # returns small complex array covering only the interior region Z = z[np.where(mask)] + 1.35j ZZ = Z * Z # fmt: off # Recursive algorithms for the polynomials in Z with coefficients AA, bb # numer = 0.0 # for A in AA[::-1]: # numer = numer * Z + A # Explicitly unrolled above loop for speed numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z + AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z + AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0]) # denom = 0.0 # for b in bb[::-1]: # denom = denom * ZZ + b # Explicitly unrolled above loop for speed denom = (((((((ZZ + bb[7])*ZZ + bb[6])*ZZ + bb[5])*ZZ+bb[4])*ZZ + bb[3])*ZZ + bb[2])*ZZ + bb[1])*ZZ + bb[0] # fmt: on np.place(w, mask, numer / denom) return w class Const1D(Fittable1DModel): """ One dimensional Constant model. Parameters ---------- amplitude : float Value of the constant function See Also -------- Const2D Notes ----- Model formula: .. math:: f(x) = A Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Const1D plt.figure() s1 = Const1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter( default=1, description="Value of the constant function", mag=True ) linear = True @staticmethod def evaluate(x, amplitude): """One dimensional Constant model function""" if amplitude.size == 1: # This is slightly faster than using ones_like and multiplying x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype) x.fill(amplitude.item()) else: # This case is less likely but could occur if the amplitude # parameter is given an array-like value x = amplitude * np.ones_like(x, subok=False) if isinstance(amplitude, Quantity): return Quantity(x, unit=amplitude.unit, copy=False, subok=True) return x @staticmethod def fit_deriv(x, amplitude): """One dimensional Constant model derivative with respect to parameters""" d_amplitude = np.ones_like(x) return [d_amplitude] @property def input_units(self): return None def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"amplitude": outputs_unit[self.outputs[0]]} class Const2D(Fittable2DModel): """ Two dimensional Constant model. Parameters ---------- amplitude : float Value of the constant function See Also -------- Const1D Notes ----- Model formula: .. math:: f(x, y) = A """ amplitude = Parameter( default=1, description="Value of the constant function", mag=True ) linear = True @staticmethod def evaluate(x, y, amplitude): """Two dimensional Constant model function""" if amplitude.size == 1: # This is slightly faster than using ones_like and multiplying x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype) x.fill(amplitude.item()) else: # This case is less likely but could occur if the amplitude # parameter is given an array-like value x = amplitude * np.ones_like(x, subok=False) if isinstance(amplitude, Quantity): return Quantity(x, unit=amplitude.unit, copy=False, subok=True) return x @property def input_units(self): return None def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"amplitude": outputs_unit[self.outputs[0]]} class Ellipse2D(Fittable2DModel): """ A 2D Ellipse model. Parameters ---------- amplitude : float Value of the ellipse. x_0 : float x position of the center of the disk. y_0 : float y position of the center of the disk. a : float The length of the semimajor axis. b : float The length of the semiminor axis. theta : float or `~astropy.units.Quantity`, optional The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise from the positive x axis. See Also -------- Disk2D, Box2D Notes ----- Model formula: .. math:: f(x, y) = \\left \\{ \\begin{array}{ll} \\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos \\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 + \\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0) \\cos \\theta}{b}\\right]^2 \\leq 1 \\\\ 0 & : \\mathrm{otherwise} \\end{array} \\right. Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Ellipse2D from astropy.coordinates import Angle import matplotlib.pyplot as plt import matplotlib.patches as mpatches x0, y0 = 25, 25 a, b = 20, 10 theta = Angle(30, 'deg') e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b, theta=theta.radian) y, x = np.mgrid[0:50, 0:50] fig, ax = plt.subplots(1, 1) ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r') e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red', facecolor='none') ax.add_patch(e2) plt.show() """ amplitude = Parameter(default=1, description="Value of the ellipse", mag=True) x_0 = Parameter(default=0, description="X position of the center of the disk.") y_0 = Parameter(default=0, description="Y position of the center of the disk.") a = Parameter(default=1, description="The length of the semimajor axis") b = Parameter(default=1, description="The length of the semiminor axis") theta = Parameter( default=0.0, description=( "Rotation angle either as a float (in radians) or a |Quantity| angle" ), ) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, a, b, theta): """Two dimensional Ellipse model function.""" xx = x - x_0 yy = y - y_0 cost = np.cos(theta) sint = np.sin(theta) numerator1 = (xx * cost) + (yy * sint) numerator2 = -(xx * sint) + (yy * cost) in_ellipse = ((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.0 result = np.select([in_ellipse], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``((y_low, y_high), (x_low, x_high))`` """ a = self.a b = self.b theta = self.theta dx, dy = ellipse_extent(a, b, theta) return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "a": inputs_unit[self.inputs[0]], "b": inputs_unit[self.inputs[0]], "theta": u.rad, "amplitude": outputs_unit[self.outputs[0]], } class Disk2D(Fittable2DModel): """ Two dimensional radial symmetric Disk model. Parameters ---------- amplitude : float Value of the disk function x_0 : float x position center of the disk y_0 : float y position center of the disk R_0 : float Radius of the disk See Also -------- Box2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(r) = \\left \\{ \\begin{array}{ll} A & : r \\leq R_0 \\\\ 0 & : r > R_0 \\end{array} \\right. """ amplitude = Parameter(default=1, description="Value of disk function", mag=True) x_0 = Parameter(default=0, description="X position of center of the disk") y_0 = Parameter(default=0, description="Y position of center of the disk") R_0 = Parameter(default=1, description="Radius of the disk") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, R_0): """Two dimensional Disk model function""" rr = (x - x_0) ** 2 + (y - y_0) ** 2 result = np.select([rr <= R_0**2], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``((y_low, y_high), (x_low, x_high))`` """ return ( (self.y_0 - self.R_0, self.y_0 + self.R_0), (self.x_0 - self.R_0, self.x_0 + self.R_0), ) @property def input_units(self): if self.x_0.unit is None and self.y_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "R_0": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Ring2D(Fittable2DModel): """ Two dimensional radial symmetric Ring model. Parameters ---------- amplitude : float Value of the disk function x_0 : float x position center of the disk y_0 : float y position center of the disk r_in : float Inner radius of the ring width : float Width of the ring. r_out : float Outer Radius of the ring. Can be specified instead of width. See Also -------- Disk2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(r) = \\left \\{ \\begin{array}{ll} A & : r_{in} \\leq r \\leq r_{out} \\\\ 0 & : \\text{else} \\end{array} \\right. Where :math:`r_{out} = r_{in} + r_{width}`. """ amplitude = Parameter(default=1, description="Value of the disk function", mag=True) x_0 = Parameter(default=0, description="X position of center of disc") y_0 = Parameter(default=0, description="Y position of center of disc") r_in = Parameter(default=1, description="Inner radius of the ring") width = Parameter(default=1, description="Width of the ring") def __init__( self, amplitude=amplitude.default, x_0=x_0.default, y_0=y_0.default, r_in=None, width=None, r_out=None, **kwargs, ): if (r_in is None) and (r_out is None) and (width is None): r_in = self.r_in.default width = self.width.default elif (r_in is not None) and (r_out is None) and (width is None): width = self.width.default elif (r_in is None) and (r_out is not None) and (width is None): r_in = self.r_in.default width = r_out - r_in elif (r_in is None) and (r_out is None) and (width is not None): r_in = self.r_in.default elif (r_in is not None) and (r_out is not None) and (width is None): width = r_out - r_in elif (r_in is None) and (r_out is not None) and (width is not None): r_in = r_out - width elif (r_in is not None) and (r_out is not None) and (width is not None): if np.any(width != (r_out - r_in)): raise InputParameterError("Width must be r_out - r_in") if np.any(r_in < 0) or np.any(width < 0): raise InputParameterError(f"{r_in=} and {width=} must both be >=0") super().__init__( amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs ) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, r_in, width): """Two dimensional Ring model function.""" rr = (x - x_0) ** 2 + (y - y_0) ** 2 r_range = np.logical_and(rr >= r_in**2, rr <= (r_in + width) ** 2) result = np.select([r_range], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dr = self.r_in + self.width return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "r_in": inputs_unit[self.inputs[0]], "width": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Box1D(Fittable1DModel): """ One dimensional Box model. Parameters ---------- amplitude : float Amplitude A x_0 : float Position of the center of the box function width : float Width of the box See Also -------- Box2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(x) = \\left \\{ \\begin{array}{ll} A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\ 0 & : \\text{else} \\end{array} \\right. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Box1D plt.figure() s1 = Box1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude A", mag=True) x_0 = Parameter(default=0, description="Position of center of box function") width = Parameter(default=1, description="Width of the box") @staticmethod def evaluate(x, amplitude, x_0, width): """One dimensional Box model function""" inside = np.logical_and(x >= x_0 - width / 2.0, x <= x_0 + width / 2.0) return np.select([inside], [amplitude], 0) @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``(x_low, x_high))`` """ dx = self.width / 2 return (self.x_0 - dx, self.x_0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} @property def return_units(self): if self.amplitude.unit is None: return None return {self.outputs[0]: self.amplitude.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "width": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Box2D(Fittable2DModel): """ Two dimensional Box model. Parameters ---------- amplitude : float Amplitude x_0 : float x position of the center of the box function x_width : float Width in x direction of the box y_0 : float y position of the center of the box function y_width : float Width in y direction of the box See Also -------- Box1D, Gaussian2D, Moffat2D Notes ----- Model formula: .. math:: f(x, y) = \\left \\{ \\begin{array}{ll} A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\ & y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\ 0 : & \\text{else} \\end{array} \\right. """ amplitude = Parameter(default=1, description="Amplitude", mag=True) x_0 = Parameter( default=0, description="X position of the center of the box function" ) y_0 = Parameter( default=0, description="Y position of the center of the box function" ) x_width = Parameter(default=1, description="Width in x direction of the box") y_width = Parameter(default=1, description="Width in y direction of the box") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width): """Two dimensional Box model function""" x_range = np.logical_and(x >= x_0 - x_width / 2.0, x <= x_0 + x_width / 2.0) y_range = np.logical_and(y >= y_0 - y_width / 2.0, y <= y_0 + y_width / 2.0) result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dx = self.x_width / 2 dy = self.y_width / 2 return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[1]], "x_width": inputs_unit[self.inputs[0]], "y_width": inputs_unit[self.inputs[1]], "amplitude": outputs_unit[self.outputs[0]], } class Trapezoid1D(Fittable1DModel): """ One dimensional Trapezoid model. Parameters ---------- amplitude : float Amplitude of the trapezoid x_0 : float Center position of the trapezoid width : float Width of the constant part of the trapezoid. slope : float Slope of the tails of the trapezoid See Also -------- Box1D, Gaussian1D, Moffat1D Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Trapezoid1D plt.figure() s1 = Trapezoid1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude of the trapezoid") x_0 = Parameter(default=0, description="Center position of the trapezoid") width = Parameter(default=1, description="Width of constant part of the trapezoid") slope = Parameter(default=1, description="Slope of the tails of trapezoid") @staticmethod def evaluate(x, amplitude, x_0, width, slope): """One dimensional Trapezoid model function""" # Compute the four points where the trapezoid changes slope # x1 <= x2 <= x3 <= x4 x2 = x_0 - width / 2.0 x3 = x_0 + width / 2.0 x1 = x2 - amplitude / slope x4 = x3 + amplitude / slope # Compute model values in pieces between the change points range_a = np.logical_and(x >= x1, x < x2) range_b = np.logical_and(x >= x2, x < x3) range_c = np.logical_and(x >= x3, x < x4) val_a = slope * (x - x1) val_b = amplitude val_c = slope * (x4 - x) result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``(x_low, x_high))`` """ dx = self.width / 2 + self.amplitude / self.slope return (self.x_0 - dx, self.x_0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "width": inputs_unit[self.inputs[0]], "slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class TrapezoidDisk2D(Fittable2DModel): """ Two dimensional circular Trapezoid model. Parameters ---------- amplitude : float Amplitude of the trapezoid x_0 : float x position of the center of the trapezoid y_0 : float y position of the center of the trapezoid R_0 : float Radius of the constant part of the trapezoid. slope : float Slope of the tails of the trapezoid in x direction. See Also -------- Disk2D, Box2D """ amplitude = Parameter(default=1, description="Amplitude of the trapezoid") x_0 = Parameter(default=0, description="X position of the center of the trapezoid") y_0 = Parameter(default=0, description="Y position of the center of the trapezoid") R_0 = Parameter(default=1, description="Radius of constant part of trapezoid") slope = Parameter( default=1, description="Slope of tails of trapezoid in x direction" ) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, R_0, slope): """Two dimensional Trapezoid Disk model function""" r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) range_1 = r <= R_0 range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope) val_1 = amplitude val_2 = amplitude + slope * (R_0 - r) result = np.select([range_1, range_2], [val_1, val_2]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dr = self.R_0 + self.amplitude / self.slope return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr)) @property def input_units(self): if self.x_0.unit is None and self.y_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit["x"] != inputs_unit["y"]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "R_0": inputs_unit[self.inputs[0]], "slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class RickerWavelet1D(Fittable1DModel): """ One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat" model). .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this model. Parameters ---------- amplitude : float Amplitude x_0 : float Position of the peak sigma : float Width of the Ricker wavelet See Also -------- RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D Notes ----- Model formula: .. math:: f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right) e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import RickerWavelet1D plt.figure() s1 = RickerWavelet1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -2, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude (peak) value") x_0 = Parameter(default=0, description="Position of the peak") sigma = Parameter(default=1, description="Width of the Ricker wavelet") @staticmethod def evaluate(x, amplitude, x_0, sigma): """One dimensional Ricker Wavelet model function""" xx_ww = (x - x_0) ** 2 / (2 * sigma**2) return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww) def bounding_box(self, factor=10.0): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of sigma used to define the limits. """ x0 = self.x_0 dx = factor * self.sigma return (x0 - dx, x0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "sigma": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class RickerWavelet2D(Fittable2DModel): """ Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat" model). .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this model. Parameters ---------- amplitude : float Amplitude x_0 : float x position of the peak y_0 : float y position of the peak sigma : float Width of the Ricker wavelet See Also -------- RickerWavelet1D, Gaussian2D Notes ----- Model formula: .. math:: f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2} + \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right) e^{\\frac{- \\left(x - x_{0}\\right)^{2} - \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}} """ amplitude = Parameter(default=1, description="Amplitude (peak) value") x_0 = Parameter(default=0, description="X position of the peak") y_0 = Parameter(default=0, description="Y position of the peak") sigma = Parameter(default=1, description="Width of the Ricker wavelet") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, sigma): """Two dimensional Ricker Wavelet model function""" rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma**2) return amplitude * (1 - rr_ww) * np.exp(-rr_ww) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "sigma": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class AiryDisk2D(Fittable2DModel): """ Two dimensional Airy disk model. Parameters ---------- amplitude : float Amplitude of the Airy function. x_0 : float x position of the maximum of the Airy function. y_0 : float y position of the maximum of the Airy function. radius : float The radius of the Airy disk (radius of the first zero). See Also -------- Box2D, TrapezoidDisk2D, Gaussian2D Notes ----- Model formula: .. math:: f(r) = A \\left[ \\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}} \\right]^2 Where :math:`J_1` is the first order Bessel function of the first kind, :math:`r` is radial distance from the maximum of the Airy function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R` is the input ``radius`` parameter, and :math:`R_z = 1.2196698912665045`). For an optical system, the radius of the first zero represents the limiting angular resolution and is approximately 1.22 * lambda / D, where lambda is the wavelength of the light and D is the diameter of the aperture. See [1]_ for more details about the Airy disk. References ---------- .. [1] https://en.wikipedia.org/wiki/Airy_disk """ amplitude = Parameter( default=1, description="Amplitude (peak value) of the Airy function" ) x_0 = Parameter(default=0, description="X position of the peak") y_0 = Parameter(default=0, description="Y position of the peak") radius = Parameter( default=1, description="The radius of the Airy disk (radius of first zero crossing)", ) _rz = None _j1 = None @classmethod def evaluate(cls, x, y, amplitude, x_0, y_0, radius): """Two dimensional Airy model function""" if cls._rz is None: from scipy.special import j1, jn_zeros cls._rz = jn_zeros(1, 1)[0] / np.pi cls._j1 = j1 r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz) if isinstance(r, Quantity): # scipy function cannot handle Quantity, so turn into array. r = r.to_value(u.dimensionless_unscaled) # Since r can be zero, we have to take care to treat that case # separately so as not to raise a numpy warning z = np.ones(r.shape) rt = np.pi * r[r > 0] z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2 if isinstance(amplitude, Quantity): # make z quantity too, otherwise in-place multiplication fails. z = Quantity(z, u.dimensionless_unscaled, copy=False, subok=True) z *= amplitude return z @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "radius": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Moffat1D(Fittable1DModel): """ One dimensional Moffat model. Parameters ---------- amplitude : float Amplitude of the model. x_0 : float x position of the maximum of the Moffat model. gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. See Also -------- Gaussian1D, Box1D Notes ----- Model formula: .. math:: f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Moffat1D plt.figure() s1 = Moffat1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude of the model") x_0 = Parameter(default=0, description="X position of maximum of Moffat model") gamma = Parameter(default=1, description="Core width of Moffat model") alpha = Parameter(default=1, description="Power index of the Moffat model") @property def fwhm(self): """ Moffat full width at half maximum. Derivation of the formula is available in `this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_. """ return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0) @staticmethod def evaluate(x, amplitude, x_0, gamma, alpha): """One dimensional Moffat model function""" return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_0, gamma, alpha): """One dimensional Moffat model derivative with respect to parameters""" fac = 1 + (x - x_0) ** 2 / gamma**2 d_A = fac ** (-alpha) d_x_0 = 2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma**2) d_gamma = 2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma**3) d_alpha = -amplitude * d_A * np.log(fac) return [d_A, d_x_0, d_gamma, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "gamma": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Moffat2D(Fittable2DModel): """ Two dimensional Moffat model. Parameters ---------- amplitude : float Amplitude of the model. x_0 : float x position of the maximum of the Moffat model. y_0 : float y position of the maximum of the Moffat model. gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. See Also -------- Gaussian2D, Box2D Notes ----- Model formula: .. math:: f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} + \\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha} """ amplitude = Parameter(default=1, description="Amplitude (peak value) of the model") x_0 = Parameter( default=0, description="X position of the maximum of the Moffat model" ) y_0 = Parameter( default=0, description="Y position of the maximum of the Moffat model" ) gamma = Parameter(default=1, description="Core width of the Moffat model") alpha = Parameter(default=1, description="Power index of the Moffat model") @property def fwhm(self): """ Moffat full width at half maximum. Derivation of the formula is available in `this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_. """ return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha): """Two dimensional Moffat model function""" rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2 return amplitude * (1 + rr_gg) ** (-alpha) @staticmethod def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha): """Two dimensional Moffat model derivative with respect to parameters""" rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2 d_A = (1 + rr_gg) ** (-alpha) d_x_0 = 2 * amplitude * alpha * d_A * (x - x_0) / (gamma**2 * (1 + rr_gg)) d_y_0 = 2 * amplitude * alpha * d_A * (y - y_0) / (gamma**2 * (1 + rr_gg)) d_alpha = -amplitude * d_A * np.log(1 + rr_gg) d_gamma = 2 * amplitude * alpha * d_A * rr_gg / (gamma * (1 + rr_gg)) return [d_A, d_x_0, d_y_0, d_gamma, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None else: return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "gamma": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Sersic2D(Fittable2DModel): r""" Two dimensional Sersic surface brightness profile. Parameters ---------- amplitude : float Surface brightness at r_eff. r_eff : float Effective (half-light) radius n : float Sersic Index. x_0 : float, optional x position of the center. y_0 : float, optional y position of the center. ellip : float, optional Ellipticity. theta : float or `~astropy.units.Quantity`, optional The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise from the positive x axis. See Also -------- Gaussian2D, Moffat2D Notes ----- Model formula: .. math:: I(x,y) = I(r) = I_e\exp\left\{ -b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right] \right\} The constant :math:`b_n` is defined such that :math:`r_e` contains half the total luminosity, and can be solved for numerically. .. math:: \Gamma(2n) = 2\gamma (2n,b_n) Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Sersic2D import matplotlib.pyplot as plt x,y = np.meshgrid(np.arange(100), np.arange(100)) mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50, ellip=.5, theta=-1) img = mod(x, y) log_img = np.log10(img) plt.figure() plt.imshow(log_img, origin='lower', interpolation='nearest', vmin=-1, vmax=2) plt.xlabel('x') plt.ylabel('y') cbar = plt.colorbar() cbar.set_label('Log Brightness', rotation=270, labelpad=25) cbar.set_ticks([-1, 0, 1, 2], update_ticks=True) plt.show() References ---------- .. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html """ amplitude = Parameter(default=1, description="Surface brightness at r_eff") r_eff = Parameter(default=1, description="Effective (half-light) radius") n = Parameter(default=4, description="Sersic Index") x_0 = Parameter(default=0, description="X position of the center") y_0 = Parameter(default=0, description="Y position of the center") ellip = Parameter(default=0, description="Ellipticity") theta = Parameter( default=0.0, description=( "Rotation angle either as a float (in radians) or a |Quantity| angle" ), ) _gammaincinv = None @classmethod def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta): """Two dimensional Sersic profile function.""" if cls._gammaincinv is None: from scipy.special import gammaincinv cls._gammaincinv = gammaincinv bn = cls._gammaincinv(2.0 * n, 0.5) a, b = r_eff, (1 - ellip) * r_eff cos_theta, sin_theta = np.cos(theta), np.sin(theta) x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2) return amplitude * np.exp(-bn * (z ** (1 / n) - 1)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "r_eff": inputs_unit[self.inputs[0]], "theta": u.rad, "amplitude": outputs_unit[self.outputs[0]], } class KingProjectedAnalytic1D(Fittable1DModel): """ Projected (surface density) analytic King Model. Parameters ---------- amplitude : float Amplitude or scaling factor. r_core : float Core radius (f(r_c) ~ 0.5 f_0) r_tide : float Tidal radius. Notes ----- This model approximates a King model with an analytic function. The derivation of this equation can be found in King '62 (equation 14). This is just an approximation of the full model and the parameters derived from this model should be taken with caution. It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2. Model formula: .. math:: f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} - \\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2 Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import KingProjectedAnalytic1D import matplotlib.pyplot as plt plt.figure() rt_list = [1, 2, 5, 10, 20] for rt in rt_list: r = np.linspace(0.1, rt, 100) mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt) sig = mod(r) plt.loglog(r, sig/sig[0], label=f"c ~ {mod.concentration:0.2f}") plt.xlabel("r") plt.ylabel(r"$\\sigma/\\sigma_0$") plt.legend() plt.show() References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K """ amplitude = Parameter( default=1, bounds=(FLOAT_EPSILON, None), description="Amplitude or scaling factor", ) r_core = Parameter( default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius" ) r_tide = Parameter( default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius" ) @property def concentration(self): """Concentration parameter of the king model""" return np.log10(np.abs(self.r_tide / self.r_core)) @staticmethod def evaluate(x, amplitude, r_core, r_tide): """ Analytic King model function. """ result = ( amplitude * r_core**2 * ( 1 / np.sqrt(x**2 + r_core**2) - 1 / np.sqrt(r_tide**2 + r_core**2) ) ** 2 ) # Set invalid r values to 0 bounds = (x >= r_tide) | (x < 0) result[bounds] = result[bounds] * 0.0 return result @staticmethod def fit_deriv(x, amplitude, r_core, r_tide): """ Analytic King model function derivatives. """ d_amplitude = ( r_core**2 * ( 1 / np.sqrt(x**2 + r_core**2) - 1 / np.sqrt(r_tide**2 + r_core**2) ) ** 2 ) d_r_core = ( 2 * amplitude * r_core**2 * ( r_core / (r_core**2 + r_tide**2) ** (3 / 2) - r_core / (r_core**2 + x**2) ** (3 / 2) ) * ( 1.0 / np.sqrt(r_core**2 + x**2) - 1.0 / np.sqrt(r_core**2 + r_tide**2) ) + 2 * amplitude * r_core * ( 1.0 / np.sqrt(r_core**2 + x**2) - 1.0 / np.sqrt(r_core**2 + r_tide**2) ) ** 2 ) d_r_tide = ( 2 * amplitude * r_core**2 * r_tide * ( 1.0 / np.sqrt(r_core**2 + x**2) - 1.0 / np.sqrt(r_core**2 + r_tide**2) ) ) / (r_core**2 + r_tide**2) ** (3 / 2) # Set invalid r values to 0 bounds = (x >= r_tide) | (x < 0) d_amplitude[bounds] = d_amplitude[bounds] * 0 d_r_core[bounds] = d_r_core[bounds] * 0 d_r_tide[bounds] = d_r_tide[bounds] * 0 return [d_amplitude, d_r_core, d_r_tide] @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. The model is not defined for r > r_tide. ``(r_low, r_high)`` """ return (0 * self.r_tide, 1 * self.r_tide) @property def input_units(self): if self.r_core.unit is None: return None return {self.inputs[0]: self.r_core.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "r_core": inputs_unit[self.inputs[0]], "r_tide": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Logarithmic1D(Fittable1DModel): """ One dimensional logarithmic model. Parameters ---------- amplitude : float, optional tau : float, optional See Also -------- Exponential1D, Gaussian1D """ amplitude = Parameter(default=1) tau = Parameter(default=1) @staticmethod def evaluate(x, amplitude, tau): return amplitude * np.log(x / tau) @staticmethod def fit_deriv(x, amplitude, tau): d_amplitude = np.log(x / tau) d_tau = np.zeros(x.shape) - (amplitude / tau) return [d_amplitude, d_tau] @property def inverse(self): new_amplitude = self.tau new_tau = self.amplitude return Exponential1D(amplitude=new_amplitude, tau=new_tau) @tau.validator def tau(self, val): if np.all(val == 0): raise ValueError("0 is not an allowed value for tau") @property def input_units(self): if self.tau.unit is None: return None return {self.inputs[0]: self.tau.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "tau": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Exponential1D(Fittable1DModel): """ One dimensional exponential model. Parameters ---------- amplitude : float, optional tau : float, optional See Also -------- Logarithmic1D, Gaussian1D """ amplitude = Parameter(default=1) tau = Parameter(default=1) @staticmethod def evaluate(x, amplitude, tau): return amplitude * np.exp(x / tau) @staticmethod def fit_deriv(x, amplitude, tau): """Derivative with respect to parameters""" d_amplitude = np.exp(x / tau) d_tau = -amplitude * (x / tau**2) * np.exp(x / tau) return [d_amplitude, d_tau] @property def inverse(self): new_amplitude = self.tau new_tau = self.amplitude return Logarithmic1D(amplitude=new_amplitude, tau=new_tau) @tau.validator def tau(self, val): """tau cannot be 0""" if np.all(val == 0): raise ValueError("0 is not an allowed value for tau") @property def input_units(self): if self.tau.unit is None: return None return {self.inputs[0]: self.tau.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "tau": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], }
c4f573db1d98c9a6b340dc40850bc8cfb9998dea93d558817cbc8a29ef5fd9a2
# Licensed under a 3-clause BSD style license - see LICENSE.rst # pylint: disable=invalid-name """ Implements projections--particularly sky projections defined in WCS Paper II [1]_. All angles are set and and displayed in degrees but internally computations are performed in radians. All functions expect inputs and outputs degrees. References ---------- .. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II) """ import abc from itertools import chain, product import numpy as np from astropy import units as u from astropy import wcs from .core import Model from .parameters import InputParameterError, Parameter from .utils import _to_orig_unit, _to_radian # List of tuples of the form # (long class name without suffix, short WCSLIB projection code): _PROJ_NAME_CODE = [ ("ZenithalPerspective", "AZP"), ("SlantZenithalPerspective", "SZP"), ("Gnomonic", "TAN"), ("Stereographic", "STG"), ("SlantOrthographic", "SIN"), ("ZenithalEquidistant", "ARC"), ("ZenithalEqualArea", "ZEA"), ("Airy", "AIR"), ("CylindricalPerspective", "CYP"), ("CylindricalEqualArea", "CEA"), ("PlateCarree", "CAR"), ("Mercator", "MER"), ("SansonFlamsteed", "SFL"), ("Parabolic", "PAR"), ("Molleweide", "MOL"), ("HammerAitoff", "AIT"), ("ConicPerspective", "COP"), ("ConicEqualArea", "COE"), ("ConicEquidistant", "COD"), ("ConicOrthomorphic", "COO"), ("BonneEqualArea", "BON"), ("Polyconic", "PCO"), ("TangentialSphericalCube", "TSC"), ("COBEQuadSphericalCube", "CSC"), ("QuadSphericalCube", "QSC"), ("HEALPix", "HPX"), ("HEALPixPolar", "XPH"), ] _NOT_SUPPORTED_PROJ_CODES = ["ZPN"] _PROJ_NAME_CODE_MAP = dict(_PROJ_NAME_CODE) projcodes = [code for _, code in _PROJ_NAME_CODE] __all__ = [ "Projection", "Pix2SkyProjection", "Sky2PixProjection", "Zenithal", "Cylindrical", "PseudoCylindrical", "Conic", "PseudoConic", "QuadCube", "HEALPix", "AffineTransformation2D", "projcodes", ] + list(map("_".join, product(["Pix2Sky", "Sky2Pix"], chain(*_PROJ_NAME_CODE)))) class _ParameterDS(Parameter): """ Same as `Parameter` but can indicate its modified status via the ``dirty`` property. This flag also gets set automatically when a parameter is modified. This ability to track parameter's modified status is needed for automatic update of WCSLIB's prjprm structure (which may be a more-time intensive operation) *only as required*. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dirty = True def validate(self, value): super().validate(value) self.dirty = True class Projection(Model): """Base class for all sky projections.""" # Radius of the generating sphere. # This sets the circumference to 360 deg so that arc length is measured in deg. r0 = 180 * u.deg / np.pi _separable = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._prj = wcs.Prjprm() @property @abc.abstractmethod def inverse(self): """ Inverse projection--all projection models must provide an inverse. """ @property def prjprm(self): """WCSLIB ``prjprm`` structure.""" self._update_prj() return self._prj def _update_prj(self): """ A default updater for projection's pv. .. warning:: This method assumes that PV0 is never modified. If a projection that uses PV0 is ever implemented in this module, that projection class should override this method. .. warning:: This method assumes that the order in which PVi values (i>0) are to be asigned is identical to the order of model parameters in ``param_names``. That is, pv[1] = model.parameters[0], ... """ if not self.param_names: return pv = [] dirty = False for p in self.param_names: param = getattr(self, p) pv.append(float(param.value)) dirty |= param.dirty param.dirty = False if dirty: self._prj.pv = None, *pv self._prj.set() class Pix2SkyProjection(Projection): """Base class for all Pix2Sky projections.""" n_inputs = 2 n_outputs = 2 _input_units_strict = True _input_units_allow_dimensionless = True def __new__(cls, *args, **kwargs): long_name = cls.name.split("_")[1] cls.prj_code = _PROJ_NAME_CODE_MAP[long_name] return super().__new__(cls) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._prj.code = self.prj_code self._update_prj() if not self.param_names: # force initial call to Prjprm.set() for projections # with no parameters: self._prj.set() self.inputs = ("x", "y") self.outputs = ("phi", "theta") @property def input_units(self): return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def evaluate(self, x, y, *args, **kwargs): self._update_prj() return self._prj.prjx2s(x, y) @property def inverse(self): pv = [getattr(self, param).value for param in self.param_names] return self._inv_cls(*pv) class Sky2PixProjection(Projection): """Base class for all Sky2Pix projections.""" n_inputs = 2 n_outputs = 2 _input_units_strict = True _input_units_allow_dimensionless = True def __new__(cls, *args, **kwargs): long_name = cls.name.split("_")[1] cls.prj_code = _PROJ_NAME_CODE_MAP[long_name] return super().__new__(cls) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._prj.code = self.prj_code self._update_prj() if not self.param_names: # force initial call to Prjprm.set() for projections # without parameters: self._prj.set() self.inputs = ("phi", "theta") self.outputs = ("x", "y") @property def input_units(self): return {self.inputs[0]: u.deg, self.inputs[1]: u.deg} @property def return_units(self): return {self.outputs[0]: u.deg, self.outputs[1]: u.deg} def evaluate(self, phi, theta, *args, **kwargs): self._update_prj() return self._prj.prjs2x(phi, theta) @property def inverse(self): pv = [getattr(self, param).value for param in self.param_names] return self._inv_cls(*pv) class Zenithal(Projection): r"""Base class for all Zenithal projections. Zenithal (or azimuthal) projections map the sphere directly onto a plane. All zenithal projections are specified by defining the radius as a function of native latitude, :math:`R_\theta`. The pixel-to-sky transformation is defined as: .. math:: \phi &= \arg(-y, x) \\ R_\theta &= \sqrt{x^2 + y^2} and the inverse (sky-to-pixel) is defined as: .. math:: x &= R_\theta \sin \phi \\ y &= R_\theta \cos \phi """ class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal): r""" Zenithal perspective projection - pixel to sky. Corresponds to the ``AZP`` projection in FITS WCS. .. math:: \phi &= \arg(-y \cos \gamma, x) \\ \theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right. where: .. math:: \psi &= \arg(\rho, 1) \\ \omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\ \rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\ R &= \sqrt{x^2 + y^2 \cos^2 \gamma} Parameters ---------- mu : float Distance from point of projection to center of sphere in spherical radii, μ. Default is 0. gamma : float Look angle γ in degrees. Default is 0°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) gamma = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Look angle γ in degrees (Default = 0°)", ) @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1" ) class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal): r""" Zenithal perspective projection - sky to pixel. Corresponds to the ``AZP`` projection in FITS WCS. .. math:: x &= R \sin \phi \\ y &= -R \sec \gamma \cos \theta where: .. math:: R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta} {(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma} Parameters ---------- mu : float Distance from point of projection to center of sphere in spherical radii, μ. Default is 0. gamma : float Look angle γ in degrees. Default is 0°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) gamma = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Look angle γ in degrees (Default=0°)", ) @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1" ) class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal): r""" Slant zenithal perspective projection - pixel to sky. Corresponds to the ``SZP`` projection in FITS WCS. Parameters ---------- mu : float Distance from point of projection to center of sphere in spherical radii, μ. Default is 0. phi0 : float The longitude φ₀ of the reference point, in degrees. Default is 0°. theta0 : float The latitude θ₀ of the reference point, in degrees. Default is 90°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) phi0 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="The longitude φ₀ of the reference point in degrees (Default=0°)", ) theta0 = _ParameterDS( default=90.0, getter=_to_orig_unit, setter=_to_radian, description="The latitude θ₀ of the reference point, in degrees (Default=0°)", ) @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1" ) class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal): r""" Zenithal perspective projection - sky to pixel. Corresponds to the ``SZP`` projection in FITS WCS. Parameters ---------- mu : float distance from point of projection to center of sphere in spherical radii, μ. Default is 0. phi0 : float The longitude φ₀ of the reference point, in degrees. Default is 0°. theta0 : float The latitude θ₀ of the reference point, in degrees. Default is 90°. """ mu = _ParameterDS( default=0.0, description="Distance from point of projection to center of sphere" ) phi0 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="The longitude φ₀ of the reference point in degrees", ) theta0 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="The latitude θ₀ of the reference point, in degrees", ) @mu.validator def mu(self, value): if np.any(np.equal(value, -1.0)): raise InputParameterError( "Zenithal perspective projection is not defined for mu = -1" ) class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal): r""" Gnomonic projection - pixel to sky. Corresponds to the ``TAN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right) """ class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal): r""" Gnomonic Projection - sky to pixel. Corresponds to the ``TAN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\cot \theta """ class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal): r""" Stereographic Projection - pixel to sky. Corresponds to the ``STG`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right) """ class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal): r""" Stereographic Projection - sky to pixel. Corresponds to the ``STG`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta} """ class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal): r""" Slant orthographic projection - pixel to sky. Corresponds to the ``SIN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. The following transformation applies when :math:`\xi` and :math:`\eta` are both zero. .. math:: \theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right) The parameters :math:`\xi` and :math:`\eta` are defined from the reference point :math:`(\phi_c, \theta_c)` as: .. math:: \xi &= \cot \theta_c \sin \phi_c \\ \eta &= - \cot \theta_c \cos \phi_c Parameters ---------- xi : float Obliqueness parameter, ξ. Default is 0.0. eta : float Obliqueness parameter, η. Default is 0.0. """ xi = _ParameterDS(default=0.0, description="Obliqueness parameter") eta = _ParameterDS(default=0.0, description="Obliqueness parameter") class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal): r""" Slant orthographic projection - sky to pixel. Corresponds to the ``SIN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. The following transformation applies when :math:`\xi` and :math:`\eta` are both zero. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\cos \theta But more specifically are: .. math:: x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\ y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)] """ xi = _ParameterDS(default=0.0) eta = _ParameterDS(default=0.0) class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal): r""" Zenithal equidistant projection - pixel to sky. Corresponds to the ``ARC`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^\circ - R_\theta """ class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal): r""" Zenithal equidistant projection - sky to pixel. Corresponds to the ``ARC`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = 90^\circ - \theta """ class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal): r""" Zenithal equidistant projection - pixel to sky. Corresponds to the ``ZEA`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right) """ class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal): r""" Zenithal equidistant projection - sky to pixel. Corresponds to the ``ZEA`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\ &= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right) """ class Pix2Sky_Airy(Pix2SkyProjection, Zenithal): r""" Airy projection - pixel to sky. Corresponds to the ``AIR`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. Parameters ---------- theta_b : float The latitude :math:`\theta_b` at which to minimize the error, in degrees. Default is 90°. """ theta_b = _ParameterDS(default=90.0) class Sky2Pix_Airy(Sky2PixProjection, Zenithal): r""" Airy - sky to pixel. Corresponds to the ``AIR`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} + \frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right) where: .. math:: \xi &= \frac{90^\circ - \theta}{2} \\ \xi_b &= \frac{90^\circ - \theta_b}{2} Parameters ---------- theta_b : float The latitude :math:`\theta_b` at which to minimize the error, in degrees. Default is 90°. """ theta_b = _ParameterDS( default=90.0, description="The latitude at which to minimize the error,in degrees", ) class Cylindrical(Projection): r"""Base class for Cylindrical projections. Cylindrical projections are so-named because the surface of projection is a cylinder. """ _separable = True class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical): r""" Cylindrical perspective - pixel to sky. Corresponds to the ``CYP`` projection in FITS WCS. .. math:: \phi &= \frac{x}{\lambda} \\ \theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right) where: .. math:: \eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda} Parameters ---------- mu : float Distance from center of sphere in the direction opposite the projected surface, in spherical radii, μ. Default is 1. lam : float Radius of the cylinder in spherical radii, λ. Default is 1. """ mu = _ParameterDS(default=1.0) lam = _ParameterDS(default=1.0) @mu.validator def mu(self, value): if np.any(value == -self.lam): raise InputParameterError("CYP projection is not defined for mu = -lambda") @lam.validator def lam(self, value): if np.any(value == -self.mu): raise InputParameterError("CYP projection is not defined for lambda = -mu") class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical): r""" Cylindrical Perspective - sky to pixel. Corresponds to the ``CYP`` projection in FITS WCS. .. math:: x &= \lambda \phi \\ y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta Parameters ---------- mu : float Distance from center of sphere in the direction opposite the projected surface, in spherical radii, μ. Default is 0. lam : float Radius of the cylinder in spherical radii, λ. Default is 0. """ mu = _ParameterDS( default=1.0, description="Distance from center of sphere in spherical radii" ) lam = _ParameterDS( default=1.0, description="Radius of the cylinder in spherical radii" ) @mu.validator def mu(self, value): if np.any(value == -self.lam): raise InputParameterError("CYP projection is not defined for mu = -lambda") @lam.validator def lam(self, value): if np.any(value == -self.mu): raise InputParameterError("CYP projection is not defined for lambda = -mu") class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical): r""" Cylindrical equal area projection - pixel to sky. Corresponds to the ``CEA`` projection in FITS WCS. .. math:: \phi &= x \\ \theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right) Parameters ---------- lam : float Radius of the cylinder in spherical radii, λ. Default is 1. """ lam = _ParameterDS(default=1) class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical): r""" Cylindrical equal area projection - sky to pixel. Corresponds to the ``CEA`` projection in FITS WCS. .. math:: x &= \phi \\ y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda} Parameters ---------- lam : float Radius of the cylinder in spherical radii, λ. Default is 0. """ lam = _ParameterDS(default=1) class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical): r""" Plate carrée projection - pixel to sky. Corresponds to the ``CAR`` projection in FITS WCS. .. math:: \phi &= x \\ \theta &= y """ @staticmethod def evaluate(x, y): # The intermediate variables are only used here for clarity phi = np.array(x) theta = np.array(y) return phi, theta class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical): r""" Plate carrée projection - sky to pixel. Corresponds to the ``CAR`` projection in FITS WCS. .. math:: x &= \phi \\ y &= \theta """ @staticmethod def evaluate(phi, theta): # The intermediate variables are only used here for clarity x = np.array(phi) y = np.array(theta) return x, y class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical): r""" Mercator - pixel to sky. Corresponds to the ``MER`` projection in FITS WCS. .. math:: \phi &= x \\ \theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ} """ class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical): r""" Mercator - sky to pixel. Corresponds to the ``MER`` projection in FITS WCS. .. math:: x &= \phi \\ y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right) """ class PseudoCylindrical(Projection): r"""Base class for pseudocylindrical projections. Pseudocylindrical projections are like cylindrical projections except the parallels of latitude are projected at diminishing lengths toward the polar regions in order to reduce lateral distortion there. Consequently, the meridians are curved. """ _separable = True class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical): r""" Sanson-Flamsteed projection - pixel to sky. Corresponds to the ``SFL`` projection in FITS WCS. .. math:: \phi &= \frac{x}{\cos y} \\ \theta &= y """ class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical): r""" Sanson-Flamsteed projection - sky to pixel. Corresponds to the ``SFL`` projection in FITS WCS. .. math:: x &= \phi \cos \theta \\ y &= \theta """ class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical): r""" Parabolic projection - pixel to sky. Corresponds to the ``PAR`` projection in FITS WCS. .. math:: \phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\ \theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right) """ class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical): r""" Parabolic projection - sky to pixel. Corresponds to the ``PAR`` projection in FITS WCS. .. math:: x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\ y &= 180^\circ \sin \frac{\theta}{3} """ class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical): r""" Molleweide's projection - pixel to sky. Corresponds to the ``MOL`` projection in FITS WCS. .. math:: \phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\ \theta &= \sin^{-1}\left( \frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right) + \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2} \right) """ class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical): r""" Molleweide's projection - sky to pixel. Corresponds to the ``MOL`` projection in FITS WCS. .. math:: x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\ y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma where :math:`\gamma` is defined as the solution of the transcendental equation: .. math:: \sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi} """ class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical): r""" Hammer-Aitoff projection - pixel to sky. Corresponds to the ``AIT`` projection in FITS WCS. .. math:: \phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\ \theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right) """ class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical): r""" Hammer-Aitoff projection - sky to pixel. Corresponds to the ``AIT`` projection in FITS WCS. .. math:: x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\ y &= \gamma \sin \theta where: .. math:: \gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}} """ class Conic(Projection): r"""Base class for conic projections. In conic projections, the sphere is thought to be projected onto the surface of a cone which is then opened out. In a general sense, the pixel-to-sky transformation is defined as: .. math:: \phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\ R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2} and the inverse (sky-to-pixel) is defined as: .. math:: x &= R_\theta \sin (C \phi) \\ y &= R_\theta \cos (C \phi) + Y_0 where :math:`C` is the "constant of the cone": .. math:: C = \frac{180^\circ \cos \theta}{\pi R_\theta} """ sigma = _ParameterDS(default=90.0, getter=_to_orig_unit, setter=_to_radian) delta = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian) class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic): r""" Colles' conic perspective projection - pixel to sky. Corresponds to the ``COP`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \sin \theta_a \\ R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\ Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic): r""" Colles' conic perspective projection - sky to pixel. Corresponds to the ``COP`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \sin \theta_a \\ R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\ Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic): r""" Alber's conic equal area projection - pixel to sky. Corresponds to the ``COE`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \gamma / 2 \\ R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\ Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)} where: .. math:: \gamma = \sin \theta_1 + \sin \theta_2 Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic): r""" Alber's conic equal area projection - sky to pixel. Corresponds to the ``COE`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \gamma / 2 \\ R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\ Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)} where: .. math:: \gamma = \sin \theta_1 + \sin \theta_2 Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic): r""" Conic equidistant projection - pixel to sky. Corresponds to the ``COD`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\ R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\ Y_0 = \eta\cot\eta\cot\theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic): r""" Conic equidistant projection - sky to pixel. Corresponds to the ``COD`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\ R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\ Y_0 = \eta\cot\eta\cot\theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic): r""" Conic orthomorphic projection - pixel to sky. Corresponds to the ``COO`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)} {\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)} {\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\ R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\ Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C where: .. math:: \psi = \frac{180^\circ}{\pi} \frac{\cos \theta} {C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C} Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic): r""" Conic orthomorphic projection - sky to pixel. Corresponds to the ``COO`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)} {\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)} {\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\ R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\ Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C where: .. math:: \psi = \frac{180^\circ}{\pi} \frac{\cos \theta} {C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C} Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """ class PseudoConic(Projection): r"""Base class for pseudoconic projections. Pseudoconics are a subclass of conics with concentric parallels. """ class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic): r""" Bonne's equal area pseudoconic projection - pixel to sky. Corresponds to the ``BON`` projection in FITS WCS. .. math:: \phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\ \theta &= Y_0 - R_\theta where: .. math:: R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\ A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) Parameters ---------- theta1 : float Bonne conformal latitude, in degrees. """ _separable = True theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian) class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic): r""" Bonne's equal area pseudoconic projection - sky to pixel. Corresponds to the ``BON`` projection in FITS WCS. .. math:: x &= R_\theta \sin A_\phi \\ y &= -R_\theta \cos A_\phi + Y_0 where: .. math:: A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\ R_\theta &= Y_0 - \theta \\ Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1 Parameters ---------- theta1 : float Bonne conformal latitude, in degrees. """ _separable = True theta1 = _ParameterDS( default=0.0, getter=_to_orig_unit, setter=_to_radian, description="Bonne conformal latitude, in degrees", ) class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic): r""" Polyconic projection - pixel to sky. Corresponds to the ``PCO`` projection in FITS WCS. """ class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic): r""" Polyconic projection - sky to pixel. Corresponds to the ``PCO`` projection in FITS WCS. """ class QuadCube(Projection): r"""Base class for quad cube projections. Quadrilateralized spherical cube (quad-cube) projections belong to the class of polyhedral projections in which the sphere is projected onto the surface of an enclosing polyhedron. The six faces of the quad-cube projections are numbered and laid out as:: 0 4 3 2 1 4 3 2 5 """ class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube): r""" Tangential spherical cube projection - pixel to sky. Corresponds to the ``TSC`` projection in FITS WCS. """ class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube): r""" Tangential spherical cube projection - sky to pixel. Corresponds to the ``TSC`` projection in FITS WCS. """ class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube): r""" COBE quadrilateralized spherical cube projection - pixel to sky. Corresponds to the ``CSC`` projection in FITS WCS. """ class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube): r""" COBE quadrilateralized spherical cube projection - sky to pixel. Corresponds to the ``CSC`` projection in FITS WCS. """ class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube): r""" Quadrilateralized spherical cube projection - pixel to sky. Corresponds to the ``QSC`` projection in FITS WCS. """ class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube): r""" Quadrilateralized spherical cube projection - sky to pixel. Corresponds to the ``QSC`` projection in FITS WCS. """ class HEALPix(Projection): r"""Base class for HEALPix projections.""" class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix): r""" HEALPix - pixel to sky. Corresponds to the ``HPX`` projection in FITS WCS. Parameters ---------- H : float The number of facets in longitude direction. X : float The number of facets in latitude direction. """ _separable = True H = _ParameterDS( default=4.0, description="The number of facets in longitude direction." ) X = _ParameterDS( default=3.0, description="The number of facets in latitude direction." ) class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix): r""" HEALPix projection - sky to pixel. Corresponds to the ``HPX`` projection in FITS WCS. Parameters ---------- H : float The number of facets in longitude direction. X : float The number of facets in latitude direction. """ _separable = True H = _ParameterDS( default=4.0, description="The number of facets in longitude direction." ) X = _ParameterDS( default=3.0, description="The number of facets in latitude direction." ) class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix): r""" HEALPix polar, aka "butterfly" projection - pixel to sky. Corresponds to the ``XPH`` projection in FITS WCS. """ class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix): r""" HEALPix polar, aka "butterfly" projection - pixel to sky. Corresponds to the ``XPH`` projection in FITS WCS. """ class AffineTransformation2D(Model): """ Perform an affine transformation in 2 dimensions. Parameters ---------- matrix : array A 2x2 matrix specifying the linear transformation to apply to the inputs translation : array A 2D vector (given as either a 2x1 or 1x2 array) specifying a translation to apply to the inputs """ n_inputs = 2 n_outputs = 2 standard_broadcasting = False _separable = False matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]]) translation = Parameter(default=[0.0, 0.0]) @matrix.validator def matrix(self, value): """Validates that the input matrix is a 2x2 2D array.""" if np.shape(value) != (2, 2): raise InputParameterError( "Expected transformation matrix to be a 2x2 array" ) @translation.validator def translation(self, value): """ Validates that the translation vector is a 2D vector. This allows either a "row" vector or a "column" vector where in the latter case the resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``. """ if not ( (np.ndim(value) == 1 and np.shape(value) == (2,)) or (np.ndim(value) == 2 and np.shape(value) == (1, 2)) ): raise InputParameterError( "Expected translation vector to be a 2 element row or column " "vector array" ) def __init__(self, matrix=matrix, translation=translation, **kwargs): super().__init__(matrix=matrix, translation=translation, **kwargs) self.inputs = ("x", "y") self.outputs = ("x", "y") @property def inverse(self): """ Inverse transformation. Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted. """ det = np.linalg.det(self.matrix.value) if det == 0: raise InputParameterError( f"Transformation matrix is singular; {self.__class__.__name__} model" " does not have an inverse" ) matrix = np.linalg.inv(self.matrix.value) if self.matrix.unit is not None: matrix = matrix * self.matrix.unit # If matrix has unit then translation has unit, so no need to assign it. translation = -np.dot(matrix, self.translation.value) return self.__class__(matrix=matrix, translation=translation) @classmethod def evaluate(cls, x, y, matrix, translation): """ Apply the transformation to a set of 2D Cartesian coordinates given as two lists--one for the x coordinates and one for a y coordinates--or a single coordinate pair. Parameters ---------- x, y : array, float x and y coordinates """ if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") shape = x.shape or (1,) # Use asarray to ensure loose the units. inarr = np.vstack( [np.asarray(x).ravel(), np.asarray(y).ravel(), np.ones(x.size, x.dtype)] ) if inarr.shape[0] != 3 or inarr.ndim != 2: raise ValueError("Incompatible input shapes") augmented_matrix = cls._create_augmented_matrix(matrix, translation) result = np.dot(augmented_matrix, inarr) x, y = result[0], result[1] x.shape = y.shape = shape return x, y @staticmethod def _create_augmented_matrix(matrix, translation): unit = None if any([hasattr(translation, "unit"), hasattr(matrix, "unit")]): if not all([hasattr(translation, "unit"), hasattr(matrix, "unit")]): raise ValueError( "To use AffineTransformation with quantities, " "both matrix and unit need to be quantities." ) unit = translation.unit # matrix should have the same units as translation if not (matrix.unit / translation.unit) == u.dimensionless_unscaled: raise ValueError("matrix and translation must have the same units.") augmented_matrix = np.empty((3, 3), dtype=float) augmented_matrix[0:2, 0:2] = matrix augmented_matrix[0:2, 2:].flat = translation augmented_matrix[2] = [0, 0, 1] if unit is not None: return augmented_matrix * unit return augmented_matrix @property def input_units(self): if self.translation.unit is None and self.matrix.unit is None: return None elif self.translation.unit is not None: return dict(zip(self.inputs, [self.translation.unit] * 2)) else: return dict(zip(self.inputs, [self.matrix.unit] * 2)) for long_name, short_name in _PROJ_NAME_CODE: # define short-name projection equivalent classes: globals()["Pix2Sky_" + short_name] = globals()["Pix2Sky_" + long_name] globals()["Sky2Pix_" + short_name] = globals()["Sky2Pix_" + long_name] # set inverse classes: globals()["Pix2Sky_" + long_name]._inv_cls = globals()["Sky2Pix_" + long_name] globals()["Sky2Pix_" + long_name]._inv_cls = globals()["Pix2Sky_" + long_name]
7a1a4eb56fb404a006615ff7f5bf8f334a723fa4d3a32893e0652f7b1c9b9a5d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains models representing polynomials and polynomial series. """ # pylint: disable=invalid-name from math import comb import numpy as np from astropy.utils import check_broadcast, indent from .core import FittableModel, Model from .functional_models import Shift from .parameters import Parameter from .utils import _validate_domain_window, poly_map_domain __all__ = [ "Chebyshev1D", "Chebyshev2D", "Hermite1D", "Hermite2D", "InverseSIP", "Legendre1D", "Legendre2D", "Polynomial1D", "Polynomial2D", "SIP", "OrthoPolynomialBase", "PolynomialModel", ] class PolynomialBase(FittableModel): """ Base class for all polynomial-like models with an arbitrary number of parameters in the form of coefficients. In this case Parameter instances are returned through the class's ``__getattr__`` rather than through class descriptors. """ # Default _param_names list; this will be filled in by the implementation's # __init__ _param_names = () linear = True col_fit_deriv = False @property def param_names(self): """Coefficient names generated based on the model's polynomial degree and number of dimensions. Subclasses should implement this to return parameter names in the desired format. On most `Model` classes this is a class attribute, but for polynomial models it is an instance attribute since each polynomial model instance can have different parameters depending on the degree of the polynomial and the number of dimensions, for example. """ return self._param_names class PolynomialModel(PolynomialBase): """ Base class for polynomial models. Its main purpose is to determine how many coefficients are needed based on the polynomial order and dimension and to provide their default values, names and ordering. """ def __init__( self, degree, n_models=None, model_set_axis=None, name=None, meta=None, **params ): self._degree = degree self._order = self.get_num_coeff(self.n_inputs) self._param_names = self._generate_coeff_names(self.n_inputs) if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = Parameter( param_name, default=np.zeros(minshape) ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) @property def degree(self): """Degree of polynomial.""" return self._degree def get_num_coeff(self, ndim): """ Return the number of coefficients in one parameter set """ if self.degree < 0: raise ValueError("Degree of polynomial must be positive or null") # deg+1 is used to account for the difference between iraf using # degree and numpy using exact degree if ndim != 1: nmixed = comb(self.degree, ndim) else: nmixed = 0 numc = self.degree * ndim + nmixed + 1 return numc def _invlex(self): c = [] lencoeff = self.degree + 1 for i in range(lencoeff): for j in range(lencoeff): if i + j <= self.degree: c.append((j, i)) return c[::-1] def _generate_coeff_names(self, ndim): names = [] if ndim == 1: for n in range(self._order): names.append(f"c{n}") else: for i in range(self.degree + 1): names.append(f"c{i}_{0}") for i in range(1, self.degree + 1): names.append(f"c{0}_{i}") for i in range(1, self.degree): for j in range(1, self.degree): if i + j < self.degree + 1: names.append(f"c{i}_{j}") return tuple(names) class _PolyDomainWindow1D(PolynomialModel): """ This class sets ``domain`` and ``window`` of 1D polynomials. """ def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, n_models, model_set_axis, name=name, meta=meta, **params ) self._set_default_domain_window(domain, window) @property def window(self): return self._window @window.setter def window(self, val): self._window = _validate_domain_window(val) @property def domain(self): return self._domain @domain.setter def domain(self, val): self._domain = _validate_domain_window(val) def _set_default_domain_window(self, domain, window): """ This method sets the ``domain`` and ``window`` attributes on 1D subclasses. """ self._default_domain_window = {"domain": None, "window": (-1, 1)} self.window = window or (-1, 1) self.domain = domain def __repr__(self): return self._format_repr( [self.degree], kwargs={"domain": self.domain, "window": self.window}, defaults=self._default_domain_window, ) def __str__(self): return self._format_str( [("Degree", self.degree), ("Domain", self.domain), ("Window", self.window)], self._default_domain_window, ) class OrthoPolynomialBase(PolynomialBase): """ This is a base class for the 2D Chebyshev and Legendre models. The polynomials implemented here require a maximum degree in x and y. For explanation of ``x_domain``, ``y_domain``, ```x_window`` and ```y_window`` see :ref:`Notes regarding usage of domain and window <astropy:domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable x_window : tuple or None, optional range of the x independent variable y_domain : tuple or None, optional domain of the y independent variable y_window : tuple or None, optional range of the y independent variable **params : dict {keyword: value} pairs, representing {parameter_name: value} """ n_inputs = 2 n_outputs = 1 def __init__( self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): self.x_degree = x_degree self.y_degree = y_degree self._order = self.get_num_coeff() # Set the ``x/y_domain`` and ``x/y_wndow`` attributes in subclasses. self._default_domain_window = { "x_window": (-1, 1), "y_window": (-1, 1), "x_domain": None, "y_domain": None, } self.x_window = x_window or self._default_domain_window["x_window"] self.y_window = y_window or self._default_domain_window["y_window"] self.x_domain = x_domain self.y_domain = y_domain self._param_names = self._generate_coeff_names() if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = Parameter( param_name, default=np.zeros(minshape) ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) @property def x_domain(self): return self._x_domain @x_domain.setter def x_domain(self, val): self._x_domain = _validate_domain_window(val) @property def y_domain(self): return self._y_domain @y_domain.setter def y_domain(self, val): self._y_domain = _validate_domain_window(val) @property def x_window(self): return self._x_window @x_window.setter def x_window(self, val): self._x_window = _validate_domain_window(val) @property def y_window(self): return self._y_window @y_window.setter def y_window(self, val): self._y_window = _validate_domain_window(val) def __repr__(self): return self._format_repr( [self.x_degree, self.y_degree], kwargs={ "x_domain": self.x_domain, "y_domain": self.y_domain, "x_window": self.x_window, "y_window": self.y_window, }, defaults=self._default_domain_window, ) def __str__(self): return self._format_str( [ ("X_Degree", self.x_degree), ("Y_Degree", self.y_degree), ("X_Domain", self.x_domain), ("Y_Domain", self.y_domain), ("X_Window", self.x_window), ("Y_Window", self.y_window), ], self._default_domain_window, ) def get_num_coeff(self): """ Determine how many coefficients are needed Returns ------- numc : int number of coefficients """ if self.x_degree < 0 or self.y_degree < 0: raise ValueError("Degree of polynomial must be positive or null") return (self.x_degree + 1) * (self.y_degree + 1) def _invlex(self): # TODO: This is a very slow way to do this; fix it and related methods # like _alpha c = [] xvar = np.arange(self.x_degree + 1) yvar = np.arange(self.y_degree + 1) for j in yvar: for i in xvar: c.append((i, j)) return np.array(c[::-1]) def invlex_coeff(self, coeffs): invlex_coeffs = [] xvar = np.arange(self.x_degree + 1) yvar = np.arange(self.y_degree + 1) for j in yvar: for i in xvar: name = f"c{i}_{j}" coeff = coeffs[self.param_names.index(name)] invlex_coeffs.append(coeff) return np.array(invlex_coeffs[::-1]) def _alpha(self): invlexdeg = self._invlex() invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1 nx = self.x_degree + 1 ny = self.y_degree + 1 alpha = np.zeros((ny * nx + 3, ny + nx)) for n in range(len(invlexdeg)): alpha[n][invlexdeg[n]] = [1, 1] alpha[-2, 0] = 1 alpha[-3, nx] = 1 return alpha def imhorner(self, x, y, coeff): _coeff = list(coeff) _coeff.extend([0, 0, 0]) alpha = self._alpha() r0 = _coeff[0] nalpha = len(alpha) karr = np.diff(alpha, axis=0) kfunc = self._fcache(x, y) x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 nterms = x_terms + y_terms for n in range(1, nterms + 1 + 3): setattr(self, "r" + str(n), 0.0) for n in range(1, nalpha): k = karr[n - 1].nonzero()[0].max() + 1 rsum = 0 for i in range(1, k + 1): rsum = rsum + getattr(self, "r" + str(i)) val = kfunc[k - 1] * (r0 + rsum) setattr(self, "r" + str(k), val) r0 = _coeff[n] for i in range(1, k): setattr(self, "r" + str(i), 0.0) result = r0 for i in range(1, nterms + 1 + 3): result = result + getattr(self, "r" + str(i)) return result def _generate_coeff_names(self): names = [] for j in range(self.y_degree + 1): for i in range(self.x_degree + 1): names.append(f"c{i}_{j}") return tuple(names) def _fcache(self, x, y): """ Computation and store the individual functions. To be implemented by subclasses" """ raise NotImplementedError("Subclasses should implement this") def evaluate(self, x, y, *coeffs): if self.x_domain is not None: x = poly_map_domain(x, self.x_domain, self.x_window) if self.y_domain is not None: y = poly_map_domain(y, self.y_domain, self.y_window) invcoeff = self.invlex_coeff(coeffs) return self.imhorner(x, y, invcoeff) def prepare_inputs(self, x, y, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs) x, y = inputs if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") return (x, y), broadcasted_shapes class Chebyshev1D(_PolyDomainWindow1D): r""" Univariate Chebyshev series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x) where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind. For explanation of ```domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window. **params : dict keyword : value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Chebyshev polynomials is a polynomial in x - since the coefficients within each Chebyshev polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with units, 2x^2 and -1 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, domain=domain, window=window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: x2 = 2 * x v[1] = x for i in range(2, self.degree + 1): v[i] = v[i - 1] * x2 - v[i - 2] return np.rollaxis(v, 0, v.ndim) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) @staticmethod def clenshaw(x, coeffs): """Evaluates the polynomial using Clenshaw's algorithm.""" if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: x2 = 2 * x c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): tmp = c0 c0 = coeffs[-i] - c1 c1 = tmp + c1 * x2 return c0 + c1 * x class Hermite1D(_PolyDomainWindow1D): r""" Univariate Hermite series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x) where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind"). For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword : value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Hermite polynomials is a polynomial in x - since the coefficients within each Hermite polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units, 4x^2 and -2 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: x2 = 2 * x v[1] = 2 * x for i in range(2, self.degree + 1): v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2] return np.rollaxis(v, 0, v.ndim) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) @staticmethod def clenshaw(x, coeffs): x2 = x * 2 if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: nd = len(coeffs) c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): temp = c0 nd = nd - 1 c0 = coeffs[-i] - c1 * (2 * (nd - 1)) c1 = temp + c1 * x2 return c0 + c1 * x2 class Hermite2D(OrthoPolynomialBase): r""" Bivariate Hermite series. It is defined as .. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y) where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Hermite polynomials is a polynomial in x and/or y - since the coefficients within each Hermite polynomial are fixed, we can't use quantities for x and/or y since the units would not be compatible. For example, the third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units, 4x^2 and -2 would have incompatible units. """ _separable = False def __init__( self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def _fcache(self, x, y): """ Calculate the individual Hermite functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = 2 * x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = 2 * y.copy() for n in range(2, x_terms): kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2] for n in range(x_terms + 2, x_terms + y_terms): kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2] return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Hermite polynomials: .. math:: H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m} Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._hermderiv1d(x, self.x_degree + 1).T y_deriv = self._hermderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _hermderiv1d(self, x, deg): """ Derivative of 1D Hermite series """ x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1, len(x)), dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: x2 = 2 * x d[1] = x2 for i in range(2, deg + 1): d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2] return np.rollaxis(d, 0, d.ndim) class Legendre1D(_PolyDomainWindow1D): r""" Univariate Legendre series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x) where ``L_i(x)`` is the corresponding Legendre polynomial. For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Legendre polynomials is a polynomial in x - since the coefficients within each Legendre polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with units, 1.5x^2 and -0.5 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: v[1] = x for i in range(2, self.degree + 1): v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i return np.rollaxis(v, 0, v.ndim) @staticmethod def clenshaw(x, coeffs): if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: nd = len(coeffs) c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): tmp = c0 nd = nd - 1 c0 = coeffs[-i] - (c1 * (nd - 1)) / nd c1 = tmp + (c1 * x * (2 * nd - 1)) / nd return c0 + c1 * x class Polynomial1D(_PolyDomainWindow1D): r""" 1D Polynomial model. It is defined as: .. math:: P = \sum_{i=0}^{i=n}C_{i} * x^{i} For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional If None, it is set to (-1, 1) window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value """ n_inputs = 1 n_outputs = 1 _separable = True def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) # Set domain separately because it's different from # the orthogonal polynomials. self._default_domain_window = { "domain": (-1, 1), "window": (-1, 1), } self.domain = domain or self._default_domain_window["domain"] self.window = window or self._default_domain_window["window"] def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.horner(x, coeffs) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ v = np.empty((self.degree + 1,) + x.shape, dtype=float) v[0] = 1 if self.degree > 0: v[1] = x for i in range(2, self.degree + 1): v[i] = v[i - 1] * x return np.rollaxis(v, 0, v.ndim) @staticmethod def horner(x, coeffs): if len(coeffs) == 1: c0 = coeffs[-1] * np.ones_like(x, subok=False) else: c0 = coeffs[-1] for i in range(2, len(coeffs) + 1): c0 = coeffs[-i] + c0 * x return c0 @property def input_units(self): if self.degree == 0 or self.c1.unit is None: return None else: return {self.inputs[0]: self.c0.unit / self.c1.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): mapping = {} for i in range(self.degree + 1): par = getattr(self, f"c{i}") mapping[par.name] = ( outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i ) return mapping class Polynomial2D(PolynomialModel): r""" 2D Polynomial model. Represents a general polynomial of degree n: .. math:: P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n + c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int Polynomial degree: largest sum of exponents (:math:`i + j`) of variables in each monomial term of the form :math:`x^i y^j`. The number of terms in a 2D polynomial of degree ``n`` is given by binomial coefficient :math:`C(n + 2, 2) = (n + 2)! / (2!\,n!) = (n + 1)(n + 2) / 2`. x_domain : tuple or None, optional domain of the x independent variable If None, it is set to (-1, 1) y_domain : tuple or None, optional domain of the y independent variable If None, it is set to (-1, 1) x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the x_domain to x_window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the y_domain to y_window **params : dict keyword: value pairs, representing parameter_name: value """ n_inputs = 2 n_outputs = 1 _separable = False def __init__( self, degree, x_domain=None, y_domain=None, x_window=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) self._default_domain_window = { "x_domain": (-1, 1), "y_domain": (-1, 1), "x_window": (-1, 1), "y_window": (-1, 1), } self.x_domain = x_domain or self._default_domain_window["x_domain"] self.y_domain = y_domain or self._default_domain_window["y_domain"] self.x_window = x_window or self._default_domain_window["x_window"] self.y_window = y_window or self._default_domain_window["y_window"] def prepare_inputs(self, x, y, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs) x, y = inputs return (x, y), broadcasted_shapes def evaluate(self, x, y, *coeffs): if self.x_domain is not None: x = poly_map_domain(x, self.x_domain, self.x_window) if self.y_domain is not None: y = poly_map_domain(y, self.y_domain, self.y_window) invcoeff = self.invlex_coeff(coeffs) result = self.multivariate_horner(x, y, invcoeff) # Special case for degree==0 to ensure that the shape of the output is # still as expected by the broadcasting rules, even though the x and y # inputs are not used in the evaluation if self.degree == 0: output_shape = check_broadcast(np.shape(coeffs[0]), x.shape) if output_shape: new_result = np.empty(output_shape) new_result[:] = result result = new_result return result def __repr__(self): return self._format_repr( [self.degree], kwargs={ "x_domain": self.x_domain, "y_domain": self.y_domain, "x_window": self.x_window, "y_window": self.y_window, }, defaults=self._default_domain_window, ) def __str__(self): return self._format_str( [ ("Degree", self.degree), ("X_Domain", self.x_domain), ("Y_Domain", self.y_domain), ("X_Window", self.x_window), ("Y_Window", self.y_window), ], self._default_domain_window, ) def fit_deriv(self, x, y, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.ndim == 2: x = x.flatten() if y.ndim == 2: y = y.flatten() if x.size != y.size: raise ValueError("Expected x and y to be of equal size") designx = x[:, None] ** np.arange(self.degree + 1) designy = y[:, None] ** np.arange(1, self.degree + 1) designmixed = [] for i in range(1, self.degree): for j in range(1, self.degree): if i + j <= self.degree: designmixed.append((x**i) * (y**j)) designmixed = np.array(designmixed).T if designmixed.any(): v = np.hstack([designx, designy, designmixed]) else: v = np.hstack([designx, designy]) return v def invlex_coeff(self, coeffs): invlex_coeffs = [] lencoeff = range(self.degree + 1) for i in lencoeff: for j in lencoeff: if i + j <= self.degree: name = f"c{j}_{i}" coeff = coeffs[self.param_names.index(name)] invlex_coeffs.append(coeff) return invlex_coeffs[::-1] def multivariate_horner(self, x, y, coeffs): """ Multivariate Horner's scheme Parameters ---------- x, y : array coeffs : array Coefficients in inverse lexical order. """ alpha = self._invlex() r0 = coeffs[0] r1 = r0 * 0.0 r2 = r0 * 0.0 karr = np.diff(alpha, axis=0) for n in range(len(karr)): if karr[n, 1] != 0: r2 = y * (r0 + r1 + r2) r1 = np.zeros_like(coeffs[0], subok=False) else: r1 = x * (r0 + r1) r0 = coeffs[n + 1] return r0 + r1 + r2 @property def input_units(self): if self.degree == 0 or (self.c1_0.unit is None and self.c0_1.unit is None): return None return { self.inputs[0]: self.c0_0.unit / self.c1_0.unit, self.inputs[1]: self.c0_0.unit / self.c0_1.unit, } def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): mapping = {} for i in range(self.degree + 1): for j in range(self.degree + 1): if i + j > 2: continue par = getattr(self, f"c{i}_{j}") mapping[par.name] = ( outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i / inputs_unit[self.inputs[1]] ** j ) return mapping @property def x_domain(self): return self._x_domain @x_domain.setter def x_domain(self, val): self._x_domain = _validate_domain_window(val) @property def y_domain(self): return self._y_domain @y_domain.setter def y_domain(self, val): self._y_domain = _validate_domain_window(val) @property def x_window(self): return self._x_window @x_window.setter def x_window(self, val): self._x_window = _validate_domain_window(val) @property def y_window(self): return self._y_window @y_window.setter def y_window(self, val): self._y_window = _validate_domain_window(val) class Chebyshev2D(OrthoPolynomialBase): r""" Bivariate Chebyshev series.. It is defined as .. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y) where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Chebyshev polynomials is a polynomial in x and/or y - since the coefficients within each Chebyshev polynomial are fixed, we can't use quantities for x and/or y since the units would not be compatible. For example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with units, 2x^2 and -1 would have incompatible units. """ _separable = False def __init__( self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def _fcache(self, x, y): """ Calculate the individual Chebyshev functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = y.copy() for n in range(2, x_terms): kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2] for n in range(x_terms + 2, x_terms + y_terms): kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2] return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Chebyshev polynomials: .. math:: T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m} Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._chebderiv1d(x, self.x_degree + 1).T y_deriv = self._chebderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _chebderiv1d(self, x, deg): """ Derivative of 1D Chebyshev series """ x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1, len(x)), dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: x2 = 2 * x d[1] = x for i in range(2, deg + 1): d[i] = d[i - 1] * x2 - d[i - 2] return np.rollaxis(d, 0, d.ndim) class Legendre2D(OrthoPolynomialBase): r""" Bivariate Legendre series. Defined as: .. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y) where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- Model formula: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x) where ``L_{i}`` is the corresponding Legendre polynomial. This model does not support the use of units/quantities, because each term in the sum of Legendre polynomials is a polynomial in x - since the coefficients within each Legendre polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with units, 1.5x^2 and -0.5 would have incompatible units. """ _separable = False def __init__( self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def _fcache(self, x, y): """ Calculate the individual Legendre functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = y.copy() for n in range(2, x_terms): kfunc[n] = ( (2 * (n - 1) + 1) * x * kfunc[n - 1] - (n - 1) * kfunc[n - 2] ) / n for n in range(2, y_terms): kfunc[n + x_terms] = ( (2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1] - (n - 1) * kfunc[n + x_terms - 2] ) / (n) return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Legendre polynomials: Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._legendderiv1d(x, self.x_degree + 1).T y_deriv = self._legendderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _legendderiv1d(self, x, deg): """Derivative of 1D Legendre polynomial""" x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1,) + x.shape, dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: d[1] = x for i in range(2, deg + 1): d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i return np.rollaxis(d, 0, d.ndim) class _SIP1D(PolynomialBase): """ This implements the Simple Imaging Polynomial Model (SIP) in 1D. It's unlikely it will be used in 1D so this class is private and SIP should be used instead. """ n_inputs = 2 n_outputs = 1 _separable = False def __init__( self, order, coeff_prefix, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): self.order = order self.coeff_prefix = coeff_prefix self._param_names = self._generate_coeff_names(coeff_prefix) if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = Parameter( param_name, default=np.zeros(minshape) ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def __repr__(self): return self._format_repr(args=[self.order, self.coeff_prefix]) def __str__(self): return self._format_str( [("Order", self.order), ("Coeff. Prefix", self.coeff_prefix)] ) def evaluate(self, x, y, *coeffs): # TODO: Rewrite this so that it uses a simpler method of determining # the matrix based on the number of given coefficients. mcoef = self._coeff_matrix(self.coeff_prefix, coeffs) return self._eval_sip(x, y, mcoef) def get_num_coeff(self, ndim): """ Return the number of coefficients in one param set """ if self.order < 2 or self.order > 9: raise ValueError("Degree of polynomial must be 2< deg < 9") nmixed = comb(self.order, ndim) # remove 3 terms because SIP deg >= 2 numc = self.order * ndim + nmixed - 2 return numc def _generate_coeff_names(self, coeff_prefix): names = [] for i in range(2, self.order + 1): names.append(f"{coeff_prefix}_{i}_{0}") for i in range(2, self.order + 1): names.append(f"{coeff_prefix}_{0}_{i}") for i in range(1, self.order): for j in range(1, self.order): if i + j < self.order + 1: names.append(f"{coeff_prefix}_{i}_{j}") return tuple(names) def _coeff_matrix(self, coeff_prefix, coeffs): mat = np.zeros((self.order + 1, self.order + 1)) for i in range(2, self.order + 1): attr = f"{coeff_prefix}_{i}_{0}" mat[i, 0] = coeffs[self.param_names.index(attr)] for i in range(2, self.order + 1): attr = f"{coeff_prefix}_{0}_{i}" mat[0, i] = coeffs[self.param_names.index(attr)] for i in range(1, self.order): for j in range(1, self.order): if i + j < self.order + 1: attr = f"{coeff_prefix}_{i}_{j}" mat[i, j] = coeffs[self.param_names.index(attr)] return mat def _eval_sip(self, x, y, coef): x = np.asarray(x, dtype=np.float64) y = np.asarray(y, dtype=np.float64) if self.coeff_prefix == "A": result = np.zeros(x.shape) else: result = np.zeros(y.shape) for i in range(coef.shape[0]): for j in range(coef.shape[1]): if 1 < i + j < self.order + 1: result = result + coef[i, j] * x**i * y**j return result class SIP(Model): """ Simple Imaging Polynomial (SIP) model. The SIP convention is used to represent distortions in FITS image headers. See [1]_ for a description of the SIP convention. Parameters ---------- crpix : list or (2,) ndarray CRPIX values a_order : int SIP polynomial order for first axis b_order : int SIP order for second axis a_coeff : dict SIP coefficients for first axis b_coeff : dict SIP coefficients for the second axis ap_order : int order for the inverse transformation (AP coefficients) bp_order : int order for the inverse transformation (BP coefficients) ap_coeff : dict coefficients for the inverse transform bp_coeff : dict coefficients for the inverse transform References ---------- .. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005 <https://ui.adsabs.harvard.edu/abs/2005ASPC..347..491S>`_ """ n_inputs = 2 n_outputs = 2 _separable = False def __init__( self, crpix, a_order, b_order, a_coeff={}, b_coeff={}, ap_order=None, bp_order=None, ap_coeff={}, bp_coeff={}, n_models=None, model_set_axis=None, name=None, meta=None, ): self._crpix = crpix self._a_order = a_order self._b_order = b_order self._a_coeff = a_coeff self._b_coeff = b_coeff self._ap_order = ap_order self._bp_order = bp_order self._ap_coeff = ap_coeff self._bp_coeff = bp_coeff self.shift_a = Shift(-crpix[0]) self.shift_b = Shift(-crpix[1]) self.sip1d_a = _SIP1D( a_order, coeff_prefix="A", n_models=n_models, model_set_axis=model_set_axis, **a_coeff, ) self.sip1d_b = _SIP1D( b_order, coeff_prefix="B", n_models=n_models, model_set_axis=model_set_axis, **b_coeff, ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta ) self._inputs = ("u", "v") self._outputs = ("x", "y") def __repr__(self): return ( f"<{self.__class__.__name__}" f"({[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]!r})>" ) def __str__(self): parts = [f"Model: {self.__class__.__name__}"] for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]: parts.append(indent(str(model), width=4)) parts.append("") return "\n".join(parts) @property def inverse(self): if self._ap_order is not None and self._bp_order is not None: return InverseSIP( self._ap_order, self._bp_order, self._ap_coeff, self._bp_coeff ) else: raise NotImplementedError("SIP inverse coefficients are not available.") def evaluate(self, x, y): u = self.shift_a.evaluate(x, *self.shift_a.param_sets) v = self.shift_b.evaluate(y, *self.shift_b.param_sets) f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets) g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets) return f, g class InverseSIP(Model): """ Inverse Simple Imaging Polynomial Parameters ---------- ap_order : int order for the inverse transformation (AP coefficients) bp_order : int order for the inverse transformation (BP coefficients) ap_coeff : dict coefficients for the inverse transform bp_coeff : dict coefficients for the inverse transform """ n_inputs = 2 n_outputs = 2 _separable = False def __init__( self, ap_order, bp_order, ap_coeff={}, bp_coeff={}, n_models=None, model_set_axis=None, name=None, meta=None, ): self._ap_order = ap_order self._bp_order = bp_order self._ap_coeff = ap_coeff self._bp_coeff = bp_coeff # define the 0th term in order to use Polynomial2D ap_coeff.setdefault("AP_0_0", 0) bp_coeff.setdefault("BP_0_0", 0) ap_coeff_params = {k.replace("AP_", "c"): v for k, v in ap_coeff.items()} bp_coeff_params = {k.replace("BP_", "c"): v for k, v in bp_coeff.items()} self.sip1d_ap = Polynomial2D( degree=ap_order, model_set_axis=model_set_axis, **ap_coeff_params ) self.sip1d_bp = Polynomial2D( degree=bp_order, model_set_axis=model_set_axis, **bp_coeff_params ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta ) def __repr__(self): return f"<{self.__class__.__name__}({[self.sip1d_ap, self.sip1d_bp]!r})>" def __str__(self): parts = [f"Model: {self.__class__.__name__}"] for model in [self.sip1d_ap, self.sip1d_bp]: parts.append(indent(str(model), width=4)) parts.append("") return "\n".join(parts) def evaluate(self, x, y): x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets) y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets) return x1, y1
e3c058627a089df2d1ec0f4222ef30ec4e9d47d2b2f358032c17cc056b721a37
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tabular models. Tabular models of any dimension can be created using `tabular_model`. For convenience `Tabular1D` and `Tabular2D` are provided. Examples -------- >>> table = np.array([[ 3., 0., 0.], ... [ 0., 2., 0.], ... [ 0., 0., 0.]]) >>> points = ([1, 2, 3], [1, 2, 3]) >>> t2 = Tabular2D(points, lookup_table=table, bounds_error=False, ... fill_value=None, method='nearest') """ # pylint: disable=invalid-name import numpy as np from astropy import units as u from .core import Model try: from scipy.interpolate import interpn has_scipy = True except ImportError: has_scipy = False __all__ = ["tabular_model", "Tabular1D", "Tabular2D"] __doctest_requires__ = {"tabular_model": ["scipy"]} class _Tabular(Model): """ Returns an interpolated lookup table value. Parameters ---------- points : tuple of ndarray of float, optional The points defining the regular grid in n dimensions. ndarray must have shapes (m1, ), ..., (mn, ), lookup_table : array-like The data on a regular grid in n dimensions. Must have shapes (m1, ..., mn, ...) method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then ``fill_value`` is used. fill_value : float or `~astropy.units.Quantity`, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". If Quantity is given, it will be converted to the unit of ``lookup_table``, if applicable. Returns ------- value : ndarray Interpolated values at input coordinates. Raises ------ ImportError Scipy is not installed. Notes ----- Uses `scipy.interpolate.interpn`. """ linear = False fittable = False standard_broadcasting = False _is_dynamic = True _id = 0 def __init__( self, points=None, lookup_table=None, method="linear", bounds_error=True, fill_value=np.nan, **kwargs, ): n_models = kwargs.get("n_models", 1) if n_models > 1: raise NotImplementedError("Only n_models=1 is supported.") super().__init__(**kwargs) self.outputs = ("y",) if lookup_table is None: raise ValueError("Must provide a lookup table.") if not isinstance(lookup_table, u.Quantity): lookup_table = np.asarray(lookup_table) if self.lookup_table.ndim != lookup_table.ndim: raise ValueError( "lookup_table should be an array with " f"{self.lookup_table.ndim} dimensions." ) if points is None: points = tuple(np.arange(x, dtype=float) for x in lookup_table.shape) else: if lookup_table.ndim == 1 and not isinstance(points, tuple): points = (points,) npts = len(points) if npts != lookup_table.ndim: raise ValueError( "Expected grid points in " f"{lookup_table.ndim} directions, got {npts}." ) if ( npts > 1 and isinstance(points[0], u.Quantity) and len({getattr(p, "unit", None) for p in points}) > 1 ): raise ValueError("points must all have the same unit.") if isinstance(fill_value, u.Quantity): if not isinstance(lookup_table, u.Quantity): raise ValueError( f"fill value is in {fill_value.unit} but expected to be unitless." ) fill_value = fill_value.to(lookup_table.unit).value self.points = points self.lookup_table = lookup_table self.bounds_error = bounds_error self.method = method self.fill_value = fill_value def __repr__(self): return ( f"<{self.__class__.__name__}(points={self.points}, " f"lookup_table={self.lookup_table})>" ) def __str__(self): default_keywords = [ ("Model", self.__class__.__name__), ("Name", self.name), ("N_inputs", self.n_inputs), ("N_outputs", self.n_outputs), ("Parameters", ""), (" points", self.points), (" lookup_table", self.lookup_table), (" method", self.method), (" fill_value", self.fill_value), (" bounds_error", self.bounds_error), ] parts = [ f"{keyword}: {value}" for keyword, value in default_keywords if value is not None ] return "\n".join(parts) @property def input_units(self): pts = self.points[0] if not isinstance(pts, u.Quantity): return None return {x: pts.unit for x in self.inputs} @property def return_units(self): if not isinstance(self.lookup_table, u.Quantity): return None return {self.outputs[0]: self.lookup_table.unit} @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(points_low, points_high)``. Examples -------- >>> from astropy.modeling.models import Tabular1D, Tabular2D >>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30]) >>> t1.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=1, upper=3) } model=Tabular1D(inputs=('x',)) order='C' ) >>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]], ... lookup_table=[[10, 20, 30], [20, 30, 40]]) >>> t2.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=1, upper=3) y: Interval(lower=2, upper=4) } model=Tabular2D(inputs=('x', 'y')) order='C' ) """ bbox = [(min(p), max(p)) for p in self.points][::-1] if len(bbox) == 1: bbox = bbox[0] return bbox def evaluate(self, *inputs): """ Return the interpolated values at the input coordinates. Parameters ---------- inputs : list of scalar or list of ndarray Input coordinates. The number of inputs must be equal to the dimensions of the lookup table. """ inputs = np.broadcast_arrays(*inputs) shape = inputs[0].shape inputs = [inp.flatten() for inp in inputs[: self.n_inputs]] inputs = np.array(inputs).T if not has_scipy: # pragma: no cover raise ImportError("Tabular model requires scipy.") result = interpn( self.points, self.lookup_table, inputs, method=self.method, bounds_error=self.bounds_error, fill_value=self.fill_value, ) # return_units not respected when points has no units if isinstance(self.lookup_table, u.Quantity) and not isinstance( self.points[0], u.Quantity ): result = result * self.lookup_table.unit if self.n_outputs == 1: result = result.reshape(shape) else: result = [r.reshape(shape) for r in result] return result @property def inverse(self): if self.n_inputs == 1: # If the wavelength array is descending instead of ascending, both # points and lookup_table need to be reversed in the inverse transform # for scipy.interpolate to work properly if np.all(np.diff(self.lookup_table) > 0): # ascending case points = self.lookup_table lookup_table = self.points[0] elif np.all(np.diff(self.lookup_table) < 0): # descending case, reverse order points = self.lookup_table[::-1] lookup_table = self.points[0][::-1] else: # equal-valued or double-valued lookup_table raise NotImplementedError return Tabular1D( points=points, lookup_table=lookup_table, method=self.method, bounds_error=self.bounds_error, fill_value=self.fill_value, ) raise NotImplementedError( "An analytical inverse transform has not been implemented for this model." ) def tabular_model(dim, name=None): """ Make a ``Tabular`` model where ``n_inputs`` is based on the dimension of the lookup_table. This model has to be further initialized and when evaluated returns the interpolated values. Parameters ---------- dim : int Dimensions of the lookup table. name : str Name for the class. Examples -------- >>> table = np.array([[3., 0., 0.], ... [0., 2., 0.], ... [0., 0., 0.]]) >>> tab = tabular_model(2, name='Tabular2D') >>> print(tab) <class 'astropy.modeling.tabular.Tabular2D'> Name: Tabular2D N_inputs: 2 N_outputs: 1 >>> points = ([1, 2, 3], [1, 2, 3]) Setting fill_value to None, allows extrapolation. >>> m = tab(points, lookup_table=table, name='my_table', ... bounds_error=False, fill_value=None, method='nearest') >>> xinterp = [0, 1, 1.5, 2.72, 3.14] >>> m(xinterp, xinterp) # doctest: +FLOAT_CMP array([3., 3., 3., 0., 0.]) """ if dim < 1: raise ValueError("Lookup table must have at least one dimension.") table = np.zeros([2] * dim) members = {"lookup_table": table, "n_inputs": dim, "n_outputs": 1} if dim == 1: members["_separable"] = True else: members["_separable"] = False if name is None: model_id = _Tabular._id _Tabular._id += 1 name = f"Tabular{model_id}" model_class = type(str(name), (_Tabular,), members) model_class.__module__ = "astropy.modeling.tabular" return model_class Tabular1D = tabular_model(1, name="Tabular1D") Tabular2D = tabular_model(2, name="Tabular2D") _tab_docs = """ method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then ``fill_value`` is used. fill_value : float, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". Returns ------- value : ndarray Interpolated values at input coordinates. Raises ------ ImportError Scipy is not installed. Notes ----- Uses `scipy.interpolate.interpn`. """ Tabular1D.__doc__ = ( """ Tabular model in 1D. Returns an interpolated lookup table value. Parameters ---------- points : array-like of float of ndim=1. The points defining the regular grid in n dimensions. lookup_table : array-like, of ndim=1. The data in one dimensions. """ + _tab_docs ) Tabular2D.__doc__ = ( """ Tabular model in 2D. Returns an interpolated lookup table value. Parameters ---------- points : tuple of ndarray of float, optional The points defining the regular grid in n dimensions. ndarray with shapes (m1, m2). lookup_table : array-like The data on a regular grid in 2 dimensions. Shape (m1, m2). """ + _tab_docs )
65760a2a32c81fa47160d762f87ece40048ef3df3a48e4d359d5ea4e92bc171f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Models that have physical origins. """ # pylint: disable=invalid-name, no-member import warnings import numpy as np from astropy import constants as const from astropy import units as u from astropy.utils.exceptions import AstropyUserWarning from .core import Fittable1DModel from .parameters import InputParameterError, Parameter __all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"] class BlackBody(Fittable1DModel): """ Blackbody model using the Planck function. Parameters ---------- temperature : `~astropy.units.Quantity` ['temperature'] Blackbody temperature. scale : float or `~astropy.units.Quantity` ['dimensionless'] Scale factor. If dimensionless, input units will assumed to be in Hz and output units in (erg / (cm ** 2 * s * Hz * sr). If not dimensionless, must be equivalent to either (erg / (cm ** 2 * s * Hz * sr) or erg / (cm ** 2 * s * AA * sr), in which case the result will be returned in the requested units and the scale will be stripped of units (with the float value applied). Notes ----- Model formula: .. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1} Examples -------- >>> from astropy.modeling import models >>> from astropy import units as u >>> bb = models.BlackBody(temperature=5000*u.K) >>> bb(6000 * u.AA) # doctest: +FLOAT_CMP <Quantity 1.53254685e-05 erg / (cm2 Hz s sr)> .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import BlackBody from astropy import units as u from astropy.visualization import quantity_support bb = BlackBody(temperature=5778*u.K) wav = np.arange(1000, 110000) * u.AA flux = bb(wav) with quantity_support(): plt.figure() plt.semilogx(wav, flux) plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--') plt.show() """ # We parametrize this model with a temperature and a scale. temperature = Parameter( default=5000.0, min=0, unit=u.K, description="Blackbody temperature" ) scale = Parameter(default=1.0, min=0, description="Scale factor") # We allow values without units to be passed when evaluating the model, and # in this case the input x values are assumed to be frequencies in Hz or wavelengths # in AA (depending on the choice of output units controlled by units on scale # and stored in self._output_units during init). _input_units_allow_dimensionless = True # We enable the spectral equivalency by default for the spectral axis input_units_equivalencies = {"x": u.spectral()} # Store the native units returned by B_nu equation _native_units = u.erg / (u.cm**2 * u.s * u.Hz * u.sr) # Store the base native output units. If scale is not dimensionless, it # must be equivalent to one of these. If equivalent to SLAM, then # input_units will expect AA for 'x', otherwise Hz. _native_output_units = { "SNU": u.erg / (u.cm**2 * u.s * u.Hz * u.sr), "SLAM": u.erg / (u.cm**2 * u.s * u.AA * u.sr), } def __init__(self, *args, **kwargs): scale = kwargs.get("scale", None) # Support scale with non-dimensionless unit by stripping the unit and # storing as self._output_units. if hasattr(scale, "unit") and not scale.unit.is_equivalent( u.dimensionless_unscaled ): output_units = scale.unit if not output_units.is_equivalent( self._native_units, u.spectral_density(1 * u.AA) ): raise ValueError( "scale units not dimensionless or in " f"surface brightness: {output_units}" ) kwargs["scale"] = scale.value self._output_units = output_units else: self._output_units = self._native_units return super().__init__(*args, **kwargs) def evaluate(self, x, temperature, scale): """Evaluate the model. Parameters ---------- x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency'] Frequency at which to compute the blackbody. If no units are given, this defaults to Hz (or AA if `scale` was initialized with units equivalent to erg / (cm ** 2 * s * AA * sr)). temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity` Temperature of the blackbody. If no units are given, this defaults to Kelvin. scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless'] Desired scale for the blackbody. Returns ------- y : number or ndarray Blackbody spectrum. The units are determined from the units of ``scale``. .. note:: Use `numpy.errstate` to suppress Numpy warnings, if desired. .. warning:: Output values might contain ``nan`` and ``inf``. Raises ------ ValueError Invalid temperature. ZeroDivisionError Wavelength is zero (when converting to frequency). """ if not isinstance(temperature, u.Quantity): in_temp = u.Quantity(temperature, u.K) else: in_temp = temperature if not isinstance(x, u.Quantity): # then we assume it has input_units which depends on the # requested output units (either Hz or AA) in_x = u.Quantity(x, self.input_units["x"]) else: in_x = x # Convert to units for calculations, also force double precision with u.add_enabled_equivalencies(u.spectral() + u.temperature()): freq = u.Quantity(in_x, u.Hz, dtype=np.float64) temp = u.Quantity(in_temp, u.K) # Check if input values are physically possible if np.any(temp < 0): raise ValueError(f"Temperature should be positive: {temp}") if not np.all(np.isfinite(freq)) or np.any(freq <= 0): warnings.warn( "Input contains invalid wavelength/frequency value(s)", AstropyUserWarning, ) log_boltz = const.h * freq / (const.k_B * temp) boltzm1 = np.expm1(log_boltz) # Calculate blackbody flux bb_nu = 2.0 * const.h * freq**3 / (const.c**2 * boltzm1) / u.sr if self.scale.unit is not None: # Will be dimensionless at this point, but may not be dimensionless_unscaled if not hasattr(scale, "unit"): # during fitting, scale will be passed without units # but we still need to convert from the input dimensionless # to dimensionless unscaled scale = scale * self.scale.unit scale = scale.to(u.dimensionless_unscaled).value # NOTE: scale is already stripped of any input units y = scale * bb_nu.to(self._output_units, u.spectral_density(freq)) # If the temperature parameter has no unit, we should return a unitless # value. This occurs for instance during fitting, since we drop the # units temporarily. if hasattr(temperature, "unit"): return y return y.value @property def input_units(self): # The input units are those of the 'x' value, which will depend on the # units compatible with the expected output units. if self._output_units.is_equivalent(self._native_output_units["SNU"]): return {self.inputs[0]: u.Hz} else: # only other option is equivalent with SLAM return {self.inputs[0]: u.AA} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"temperature": u.K} @property def bolometric_flux(self): """Bolometric flux.""" if self.scale.unit is not None: # Will be dimensionless at this point, but may not be dimensionless_unscaled scale = self.scale.quantity.to(u.dimensionless_unscaled) else: scale = self.scale.value # bolometric flux in the native units of the planck function native_bolflux = scale * const.sigma_sb * self.temperature**4 / np.pi # return in more "astro" units return native_bolflux.to(u.erg / (u.cm**2 * u.s)) @property def lambda_max(self): """Peak wavelength when the curve is expressed as power density.""" return const.b_wien / self.temperature @property def nu_max(self): """Peak frequency when the curve is expressed as power density.""" return 2.8214391 * const.k_B * self.temperature / const.h class Drude1D(Fittable1DModel): """ Drude model based one the behavior of electons in materials (esp. metals). Parameters ---------- amplitude : float Peak value x_0 : float Position of the peak fwhm : float Full width at half maximum Model formula: .. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Drude1D fig, ax = plt.subplots() # generate the curves and plot them x = np.arange(7.5 , 12.5 , 0.1) dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0) ax.plot(x, dmodel(x)) ax.set_xlabel('x') ax.set_ylabel('F(x)') plt.show() """ amplitude = Parameter(default=1.0, description="Peak Value") x_0 = Parameter(default=1.0, description="Position of the peak") fwhm = Parameter(default=1.0, description="Full width at half maximum") @staticmethod def evaluate(x, amplitude, x_0, fwhm): """ One dimensional Drude model function """ return ( amplitude * ((fwhm / x_0) ** 2) / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2) ) @staticmethod def fit_deriv(x, amplitude, x_0, fwhm): """ Drude1D model function derivatives. """ d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2) d_x_0 = ( -2 * amplitude * d_amplitude * ( (1 / x_0) + d_amplitude * (x_0**2 / fwhm**2) * ( (-x / x_0 - 1 / x) * (x / x_0 - x_0 / x) - (2 * fwhm**2 / x_0**3) ) ) ) d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude) return [d_amplitude, d_x_0, d_fwhm] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "fwhm": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } @property def return_units(self): if self.amplitude.unit is None: return None return {self.outputs[0]: self.amplitude.unit} @x_0.validator def x_0(self, val): """Ensure `x_0` is not 0.""" if np.any(val == 0): raise InputParameterError("0 is not an allowed value for x_0") def bounding_box(self, factor=50): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of FWHM used to define the limits. """ x0 = self.x_0 dx = factor * self.fwhm return (x0 - dx, x0 + dx) class Plummer1D(Fittable1DModel): r"""One dimensional Plummer density profile model. Parameters ---------- mass : float Total mass of cluster. r_plum : float Scale parameter which sets the size of the cluster core. Notes ----- Model formula: .. math:: \rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2} References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P """ mass = Parameter(default=1.0, description="Total mass of cluster") r_plum = Parameter( default=1.0, description="Scale parameter which sets the size of the cluster core", ) @staticmethod def evaluate(x, mass, r_plum): """ Evaluate plummer density profile model. """ return ( (3 * mass) / (4 * np.pi * r_plum**3) * (1 + (x / r_plum) ** 2) ** (-5 / 2) ) @staticmethod def fit_deriv(x, mass, r_plum): """ Plummer1D model derivatives. """ d_mass = 3 / ((4 * np.pi * r_plum**3) * (((x / r_plum) ** 2 + 1) ** (5 / 2))) d_r_plum = (6 * mass * x**2 - 9 * mass * r_plum**2) / ( (4 * np.pi * r_plum**6) * (1 + (x / r_plum) ** 2) ** (7 / 2) ) return [d_mass, d_r_plum] @property def input_units(self): if self.mass.unit is None and self.r_plum.unit is None: return None else: return {self.inputs[0]: self.r_plum.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "mass": outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3, "r_plum": inputs_unit[self.inputs[0]], } class NFW(Fittable1DModel): r""" Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter. Parameters ---------- mass : float or `~astropy.units.Quantity` ['mass'] Mass of NFW peak within specified overdensity radius. concentration : float Concentration of the NFW profile. redshift : float Redshift of the NFW profile. massfactor : tuple or str Mass overdensity factor and type for provided profiles: Tuple version: ("virial",) : virial radius ("critical", N) : radius where density is N times that of the critical density ("mean", N) : radius where density is N times that of the mean density String version: "virial" : virial radius "Nc" : radius where density is N times that of the critical density (e.g. "200c") "Nm" : radius where density is N times that of the mean density (e.g. "500m") cosmo : :class:`~astropy.cosmology.Cosmology` Background cosmology for density calculation. If None, the default cosmology will be used. Notes ----- Model formula: .. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2} References ---------- .. [1] https://arxiv.org/pdf/astro-ph/9508025 .. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile .. [3] https://en.wikipedia.org/wiki/Virial_mass """ # Model Parameters # NFW Profile mass mass = Parameter( default=1.0, min=1.0, unit=u.M_sun, description="Peak mass within specified overdensity radius", ) # NFW profile concentration concentration = Parameter(default=1.0, min=1.0, description="Concentration") # NFW Profile redshift redshift = Parameter(default=0.0, min=0.0, description="Redshift") # We allow values without units to be passed when evaluating the model, and # in this case the input r values are assumed to be lengths / positions in kpc. _input_units_allow_dimensionless = True def __init__( self, mass=u.Quantity(mass.default, mass.unit), concentration=concentration.default, redshift=redshift.default, massfactor=("critical", 200), cosmo=None, **kwargs, ): # Set default cosmology if cosmo is None: # LOCAL from astropy.cosmology import default_cosmology cosmo = default_cosmology.get() # Set mass overdensity type and factor self._density_delta(massfactor, cosmo, redshift) # Establish mass units for density calculation (default solar masses) if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Obtain scale radius self._radius_s(mass, concentration) # Obtain scale density self._density_s(mass, concentration) super().__init__( mass=in_mass, concentration=concentration, redshift=redshift, **kwargs ) def evaluate(self, r, mass, concentration, redshift): """ One dimensional NFW profile function Parameters ---------- r : float or `~astropy.units.Quantity` ['length'] Radial position of density to be calculated for the NFW profile. mass : float or `~astropy.units.Quantity` ['mass'] Mass of NFW peak within specified overdensity radius. concentration : float Concentration of the NFW profile. redshift : float Redshift of the NFW profile. Returns ------- density : float or `~astropy.units.Quantity` ['density'] NFW profile mass density at location ``r``. The density units are: [``mass`` / ``r`` ^3] Notes ----- .. warning:: Output values might contain ``nan`` and ``inf``. """ # Create radial version of input with dimension if hasattr(r, "unit"): in_r = r else: in_r = u.Quantity(r, u.kpc) # Define reduced radius (r / r_{\\rm s}) # also update scale radius radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit) # Density distribution # \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2} # also update scale density density = self._density_s(mass, concentration) / ( radius_reduced * (u.Quantity(1.0) + radius_reduced) ** 2 ) if hasattr(mass, "unit"): return density else: return density.value def _density_delta(self, massfactor, cosmo, redshift): """ Calculate density delta. """ # Set mass overdensity type and factor if isinstance(massfactor, tuple): # Tuple options # ("virial") : virial radius # ("critical", N) : radius where density is N that of the critical density # ("mean", N) : radius where density is N that of the mean density if massfactor[0].lower() == "virial": # Virial Mass delta = None masstype = massfactor[0].lower() elif massfactor[0].lower() == "critical": # Critical or Mean Overdensity Mass delta = float(massfactor[1]) masstype = "c" elif massfactor[0].lower() == "mean": # Critical or Mean Overdensity Mass delta = float(massfactor[1]) masstype = "m" else: raise ValueError( f"Massfactor '{massfactor[0]}' not one of 'critical', " "'mean', or 'virial'" ) else: try: # String options # virial : virial radius # Nc : radius where density is N that of the critical density # Nm : radius where density is N that of the mean density if massfactor.lower() == "virial": # Virial Mass delta = None masstype = massfactor.lower() elif massfactor[-1].lower() == "c" or massfactor[-1].lower() == "m": # Critical or Mean Overdensity Mass delta = float(massfactor[0:-1]) masstype = massfactor[-1].lower() else: raise ValueError( f"Massfactor {massfactor} string not of the form " "'#m', '#c', or 'virial'" ) except (AttributeError, TypeError): raise TypeError(f"Massfactor {massfactor} not a tuple or string") # Set density from masstype specification if masstype == "virial": Om_c = cosmo.Om(redshift) - 1.0 d_c = 18.0 * np.pi**2 + 82.0 * Om_c - 39.0 * Om_c**2 self.density_delta = d_c * cosmo.critical_density(redshift) elif masstype == "c": self.density_delta = delta * cosmo.critical_density(redshift) elif masstype == "m": self.density_delta = ( delta * cosmo.critical_density(redshift) * cosmo.Om(redshift) ) return self.density_delta @staticmethod def A_NFW(y): r""" Dimensionless volume integral of the NFW profile, used as an intermediate step in some calculations for this model. Notes ----- Model formula: .. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}] """ return np.log(1.0 + y) - (y / (1.0 + y)) def _density_s(self, mass, concentration): """ Calculate scale density of the NFW profile. """ # Enforce default units if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Calculate scale density # M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right]. self.density_s = in_mass / ( 4.0 * np.pi * self._radius_s(in_mass, concentration) ** 3 * self.A_NFW(concentration) ) return self.density_s @property def rho_scale(self): r""" Scale density of the NFW profile. Often written in the literature as :math:`\rho_s` """ return self.density_s def _radius_s(self, mass, concentration): """ Calculate scale radius of the NFW profile. """ # Enforce default units if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Delta Mass is related to delta radius by # M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c} # And delta radius is related to the NFW scale radius by # c = R / r_{\\rm s} self.radius_s = ( ((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** (1.0 / 3.0) ) / concentration # Set radial units to kiloparsec by default (unit will be rescaled by units of radius # in evaluate) return self.radius_s.to(u.kpc) @property def r_s(self): """ Scale radius of the NFW profile. """ return self.radius_s @property def r_virial(self): """ Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.). """ return self.r_s * self.concentration @property def r_max(self): """ Radius of maximum circular velocity. """ return self.r_s * 2.16258 @property def v_max(self): """ Maximum circular velocity. """ return self.circular_velocity(self.r_max) def circular_velocity(self, r): r""" Circular velocities of the NFW profile. Parameters ---------- r : float or `~astropy.units.Quantity` ['length'] Radial position of velocity to be calculated for the NFW profile. Returns ------- velocity : float or `~astropy.units.Quantity` ['speed'] NFW profile circular velocity at location ``r``. The velocity units are: [km / s] Notes ----- Model formula: .. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)} .. math:: x = r/r_s .. warning:: Output values might contain ``nan`` and ``inf``. """ # Enforce default units (if parameters are without units) if hasattr(r, "unit"): in_r = r else: in_r = u.Quantity(r, u.kpc) # Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir) v_profile = np.sqrt( self.mass * const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2)) / self.r_virial ) # Define reduced radius (r / r_{\\rm s}) reduced_radius = in_r / self.r_virial.to(in_r.unit) # Circular velocity given by: # v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)} # where x=r/r_{200} velocity = np.sqrt( (v_profile**2 * self.A_NFW(self.concentration * reduced_radius)) / (reduced_radius * self.A_NFW(self.concentration)) ) return velocity.to(u.km / u.s) @property def input_units(self): # The units for the 'r' variable should be a length (default kpc) return {self.inputs[0]: u.kpc} @property def return_units(self): # The units for the 'density' variable should be a matter density (default M_sun / kpc^3) if self.mass.unit is None: return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3} else: return { self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3 } def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"mass": u.M_sun, "concentration": None, "redshift": None}
92d8418b94b677869043c17fa045191f4c7cbc5e99392a9f20aa384e7428170e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Statistic functions used in `~astropy.modeling.fitting`. """ # pylint: disable=invalid-name import numpy as np __all__ = ["leastsquare", "leastsquare_1d", "leastsquare_2d", "leastsquare_3d"] def leastsquare(measured_vals, updated_model, weights, *x): """Least square statistic, with optional weights, in N-dimensions. Parameters ---------- measured_vals : ndarray or sequence Measured data values. Will be cast to array whose shape must match the array-cast of the evaluated model. updated_model : :class:`~astropy.modeling.Model` instance Model with parameters set by the current iteration of the optimizer. when evaluated on "x", must return array of shape "measured_vals" weights : ndarray or None Array of weights to apply to each residual. *x : ndarray Independent variables on which to evaluate the model. Returns ------- res : float The sum of least squares. See Also -------- :func:`~astropy.modeling.statistic.leastsquare_1d` :func:`~astropy.modeling.statistic.leastsquare_2d` :func:`~astropy.modeling.statistic.leastsquare_3d` Notes ----- Models in :mod:`~astropy.modeling` have broadcasting rules that try to match inputs with outputs with Model shapes. Numpy arrays have flexible broadcasting rules, so mismatched shapes can often be made compatible. To ensure data matches the model we must perform shape comparison and leverage the Numpy arithmetic functions. This can obfuscate arithmetic computation overrides, like with Quantities. Implement a custom statistic for more direct control. """ model_vals = updated_model(*x) if np.shape(model_vals) != np.shape(measured_vals): raise ValueError( f"Shape mismatch between model ({np.shape(model_vals)}) " f"and measured ({np.shape(measured_vals)})" ) if weights is None: weights = 1.0 return np.sum(np.square(weights * np.subtract(model_vals, measured_vals))) # ------------------------------------------------------------------- def leastsquare_1d(measured_vals, updated_model, weights, x): """ Least square statistic with optional weights. Safer than the general :func:`~astropy.modeling.statistic.leastsquare` for 1D models by avoiding numpy methods that support broadcasting. Parameters ---------- measured_vals : ndarray Measured data values. updated_model : `~astropy.modeling.Model` Model with parameters set by the current iteration of the optimizer. weights : ndarray or None Array of weights to apply to each residual. x : ndarray Independent variable "x" on which to evaluate the model. Returns ------- res : float The sum of least squares. See Also -------- :func:`~astropy.modeling.statistic.leastsquare` """ model_vals = updated_model(x) if weights is None: return np.sum((model_vals - measured_vals) ** 2) return np.sum((weights * (model_vals - measured_vals)) ** 2) def leastsquare_2d(measured_vals, updated_model, weights, x, y): """ Least square statistic with optional weights. Safer than the general :func:`~astropy.modeling.statistic.leastsquare` for 2D models by avoiding numpy methods that support broadcasting. Parameters ---------- measured_vals : ndarray Measured data values. updated_model : `~astropy.modeling.Model` Model with parameters set by the current iteration of the optimizer. weights : ndarray or None Array of weights to apply to each residual. x : ndarray Independent variable "x" on which to evaluate the model. y : ndarray Independent variable "y" on which to evaluate the model. Returns ------- res : float The sum of least squares. See Also -------- :func:`~astropy.modeling.statistic.leastsquare` """ model_vals = updated_model(x, y) if weights is None: return np.sum((model_vals - measured_vals) ** 2) return np.sum((weights * (model_vals - measured_vals)) ** 2) def leastsquare_3d(measured_vals, updated_model, weights, x, y, z): """ Least square statistic with optional weights. Safer than the general :func:`~astropy.modeling.statistic.leastsquare` for 3D models by avoiding numpy methods that support broadcasting. Parameters ---------- measured_vals : ndarray Measured data values. updated_model : `~astropy.modeling.Model` Model with parameters set by the current iteration of the optimizer. weights : ndarray or None Array of weights to apply to each residual. x : ndarray Independent variable "x" on which to evaluate the model. y : ndarray Independent variable "y" on which to evaluate the model. z : ndarray Independent variable "z" on which to evaluate the model. Returns ------- res : float The sum of least squares. See Also -------- :func:`~astropy.modeling.statistic.leastsquare` """ model_vals = updated_model(x, y, z) if weights is None: return np.sum((model_vals - measured_vals) ** 2) return np.sum((weights * (model_vals - measured_vals)) ** 2)
f8f6105373ff9a36ed4891f139c6c767e37eb52316beeb4e35880d85eea85f3e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Power law model variants """ # pylint: disable=invalid-name import numpy as np from astropy.units import Magnitude, Quantity, UnitsError, dimensionless_unscaled, mag from .core import Fittable1DModel from .parameters import InputParameterError, Parameter __all__ = [ "PowerLaw1D", "BrokenPowerLaw1D", "SmoothlyBrokenPowerLaw1D", "ExponentialCutoffPowerLaw1D", "LogParabola1D", "Schechter1D", ] class PowerLaw1D(Fittable1DModel): """ One dimensional power law model. Parameters ---------- amplitude : float Model amplitude at the reference point x_0 : float Reference point alpha : float Power law index See Also -------- BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``): .. math:: f(x) = A (x / x_0) ^ {-\\alpha} """ amplitude = Parameter(default=1, description="Peak value at the reference point") x_0 = Parameter(default=1, description="Reference point") alpha = Parameter(default=1, description="Power law index") @staticmethod def evaluate(x, amplitude, x_0, alpha): """One dimensional power law model function""" xx = x / x_0 return amplitude * xx ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_0, alpha): """One dimensional power law derivative with respect to parameters""" xx = x / x_0 d_amplitude = xx ** (-alpha) d_x_0 = amplitude * alpha * d_amplitude / x_0 d_alpha = -amplitude * d_amplitude * np.log(xx) return [d_amplitude, d_x_0, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class BrokenPowerLaw1D(Fittable1DModel): """ One dimensional power law model with a break. Parameters ---------- amplitude : float Model amplitude at the break point. x_break : float Break point. alpha_1 : float Power law index for x < x_break. alpha_2 : float Power law index for x > x_break. See Also -------- PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1` for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``): .. math:: f(x) = \\left \\{ \\begin{array}{ll} A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\ A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\ \\end{array} \\right. """ amplitude = Parameter(default=1, description="Peak value at break point") x_break = Parameter(default=1, description="Break point") alpha_1 = Parameter(default=1, description="Power law index before break point") alpha_2 = Parameter(default=1, description="Power law index after break point") @staticmethod def evaluate(x, amplitude, x_break, alpha_1, alpha_2): """One dimensional broken power law model function""" alpha = np.where(x < x_break, alpha_1, alpha_2) xx = x / x_break return amplitude * xx ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2): """One dimensional broken power law derivative with respect to parameters""" alpha = np.where(x < x_break, alpha_1, alpha_2) xx = x / x_break d_amplitude = xx ** (-alpha) d_x_break = amplitude * alpha * d_amplitude / x_break d_alpha = -amplitude * d_amplitude * np.log(xx) d_alpha_1 = np.where(x < x_break, d_alpha, 0) d_alpha_2 = np.where(x >= x_break, d_alpha, 0) return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2] @property def input_units(self): if self.x_break.unit is None: return None return {self.inputs[0]: self.x_break.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_break": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class SmoothlyBrokenPowerLaw1D(Fittable1DModel): """One dimensional smoothly broken power law model. Parameters ---------- amplitude : float Model amplitude at the break point. x_break : float Break point. alpha_1 : float Power law index for ``x << x_break``. alpha_2 : float Power law index for ``x >> x_break``. delta : float Smoothness parameter. See Also -------- BrokenPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for ``x_break``, :math:`\\alpha_1` for ``alpha_1``, :math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for ``delta``): .. math:: f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1} \\left\\{ \\frac{1}{2} \\left[ 1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta} \\right] \\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta} The change of slope occurs between the values :math:`x_1` and :math:`x_2` such that: .. math:: \\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1} \\sim \\Delta At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the model is approximately a simple power law with index :math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two power laws are smoothly joined at values :math:`x_1 < x < x_2`, hence the :math:`\\Delta` parameter sets the "smoothness" of the slope change. The ``delta`` parameter is bounded to values greater than 1e-3 (corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid overflow errors. The ``amplitude`` parameter is bounded to positive values since this model is typically used to represent positive quantities. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling import models x = np.logspace(0.7, 2.3, 500) f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20, alpha_1=-2, alpha_2=2) plt.figure() plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2") f.delta = 0.5 plt.loglog(x, f(x), '--', label='delta=0.5') f.delta = 0.3 plt.loglog(x, f(x), '-.', label='delta=0.3') f.delta = 0.1 plt.loglog(x, f(x), label='delta=0.1') plt.axis([x.min(), x.max(), 0.1, 1.1]) plt.legend(loc='lower center') plt.grid(True) plt.show() """ amplitude = Parameter( default=1, min=0, description="Peak value at break point", mag=True ) x_break = Parameter(default=1, description="Break point") alpha_1 = Parameter(default=-2, description="Power law index before break point") alpha_2 = Parameter(default=2, description="Power law index after break point") delta = Parameter(default=1, min=1.0e-3, description="Smoothness Parameter") @amplitude.validator def amplitude(self, value): if np.any(value <= 0): raise InputParameterError("amplitude parameter must be > 0") @delta.validator def delta(self, value): if np.any(value < 0.001): raise InputParameterError("delta parameter must be >= 0.001") @staticmethod def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta): """One dimensional smoothly broken power law model function""" # Pre-calculate `x/x_b` xx = x / x_break # Initialize the return value f = np.zeros_like(xx, subok=False) if isinstance(amplitude, Quantity): return_unit = amplitude.unit amplitude = amplitude.value else: return_unit = None # The quantity `t = (x / x_b)^(1 / delta)` can become quite # large. To avoid overflow errors we will start by calculating # its natural logarithm: logt = np.log(xx) / delta # When `t >> 1` or `t << 1` we don't actually need to compute # the `t` value since the main formula (see docstring) can be # significantly simplified by neglecting `1` or `t` # respectively. In the following we will check whether `t` is # much greater, much smaller, or comparable to 1 by comparing # the `logt` value with an appropriate threshold. threshold = 30 # corresponding to exp(30) ~ 1e13 i = logt > threshold if i.max(): # In this case the main formula reduces to a simple power # law with index `alpha_2`. f[i] = ( amplitude * xx[i] ** (-alpha_2) / (2.0 ** ((alpha_1 - alpha_2) * delta)) ) i = logt < -threshold if i.max(): # In this case the main formula reduces to a simple power # law with index `alpha_1`. f[i] = ( amplitude * xx[i] ** (-alpha_1) / (2.0 ** ((alpha_1 - alpha_2) * delta)) ) i = np.abs(logt) <= threshold if i.max(): # In this case the `t` value is "comparable" to 1, hence we # we will evaluate the whole formula. t = np.exp(logt[i]) r = (1.0 + t) / 2.0 f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta) if return_unit: return Quantity(f, unit=return_unit, copy=False, subok=True) return f @staticmethod def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta): """One dimensional smoothly broken power law derivative with respect to parameters""" # Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in # SmoothlyBrokenPowerLaw1D.evaluate) xx = x / x_break logt = np.log(xx) / delta # Initialize the return values f = np.zeros_like(xx) d_amplitude = np.zeros_like(xx) d_x_break = np.zeros_like(xx) d_alpha_1 = np.zeros_like(xx) d_alpha_2 = np.zeros_like(xx) d_delta = np.zeros_like(xx) threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate) i = logt > threshold if i.max(): f[i] = ( amplitude * xx[i] ** (-alpha_2) / (2.0 ** ((alpha_1 - alpha_2) * delta)) ) d_amplitude[i] = f[i] / amplitude d_x_break[i] = f[i] * alpha_2 / x_break d_alpha_1[i] = f[i] * (-delta * np.log(2)) d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2)) d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2)) i = logt < -threshold if i.max(): f[i] = ( amplitude * xx[i] ** (-alpha_1) / (2.0 ** ((alpha_1 - alpha_2) * delta)) ) d_amplitude[i] = f[i] / amplitude d_x_break[i] = f[i] * alpha_1 / x_break d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2)) d_alpha_2[i] = f[i] * delta * np.log(2) d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2)) i = np.abs(logt) <= threshold if i.max(): t = np.exp(logt[i]) r = (1.0 + t) / 2.0 f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta) d_amplitude[i] = f[i] / amplitude d_x_break[i] = ( f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2.0 / r) / x_break ) d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r)) d_alpha_2[i] = f[i] * (-delta * np.log(r)) d_delta[i] = ( f[i] * (alpha_1 - alpha_2) * (np.log(r) - t / (1.0 + t) / delta * np.log(xx[i])) ) return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta] @property def input_units(self): if self.x_break.unit is None: return None return {self.inputs[0]: self.x_break.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_break": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class ExponentialCutoffPowerLaw1D(Fittable1DModel): """ One dimensional power law model with an exponential cutoff. Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index x_cutoff : float Cutoff point See Also -------- PowerLaw1D, BrokenPowerLaw1D, LogParabola1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``): .. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff}) """ amplitude = Parameter(default=1, description="Peak value of model") x_0 = Parameter(default=1, description="Reference point") alpha = Parameter(default=1, description="Power law index") x_cutoff = Parameter(default=1, description="Cutoff point") @staticmethod def evaluate(x, amplitude, x_0, alpha, x_cutoff): """One dimensional exponential cutoff power law model function""" xx = x / x_0 return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff) @staticmethod def fit_deriv(x, amplitude, x_0, alpha, x_cutoff): """ One dimensional exponential cutoff power law derivative with respect to parameters """ xx = x / x_0 xc = x / x_cutoff d_amplitude = xx ** (-alpha) * np.exp(-xc) d_x_0 = alpha * amplitude * d_amplitude / x_0 d_alpha = -amplitude * d_amplitude * np.log(xx) d_x_cutoff = amplitude * x * d_amplitude / x_cutoff**2 return [d_amplitude, d_x_0, d_alpha, d_x_cutoff] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "x_cutoff": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class LogParabola1D(Fittable1DModel): """ One dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``): .. math:: f(x) = A \\left( \\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}} \\right )}} """ amplitude = Parameter(default=1, description="Peak value of model") x_0 = Parameter(default=1, description="Reference point") alpha = Parameter(default=1, description="Power law index") beta = Parameter(default=0, description="Power law curvature") @staticmethod def evaluate(x, amplitude, x_0, alpha, beta): """One dimensional log parabola model function""" xx = x / x_0 exponent = -alpha - beta * np.log(xx) return amplitude * xx**exponent @staticmethod def fit_deriv(x, amplitude, x_0, alpha, beta): """One dimensional log parabola derivative with respect to parameters""" xx = x / x_0 log_xx = np.log(xx) exponent = -alpha - beta * log_xx d_amplitude = xx**exponent d_beta = -amplitude * d_amplitude * log_xx**2 d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0) d_alpha = -amplitude * d_amplitude * log_xx return [d_amplitude, d_x_0, d_alpha, d_beta] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Schechter1D(Fittable1DModel): r""" Schechter luminosity function (`Schechter 1976 <https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract>`_), parameterized in terms of magnitudes. Parameters ---------- phi_star : float The normalization factor in units of number density. m_star : float The characteristic magnitude where the power-law form of the function cuts off. alpha : float The power law index, also known as the faint-end slope. Must not have units. See Also -------- PowerLaw1D, ExponentialCutoffPowerLaw1D, BrokenPowerLaw1D Notes ----- Model formula (with :math:`\phi^{*}` for ``phi_star``, :math:`M^{*}` for ``m_star``, and :math:`\alpha` for ``alpha``): .. math:: n(M) \ dM = (0.4 \ln 10) \ \phi^{*} \ [{10^{0.4 (M^{*} - M)}}]^{\alpha + 1} \ \exp{[-10^{0.4 (M^{*} - M)}]} \ dM ``phi_star`` is the normalization factor in units of number density. ``m_star`` is the characteristic magnitude where the power-law form of the function cuts off into the exponential form. ``alpha`` is the power-law index, defining the faint-end slope of the luminosity function. Examples -------- .. plot:: :include-source: from astropy.modeling.models import Schechter1D import astropy.units as u import matplotlib.pyplot as plt import numpy as np phi_star = 4.3e-4 * (u.Mpc ** -3) m_star = -20.26 alpha = -1.98 model = Schechter1D(phi_star, m_star, alpha) mag = np.linspace(-25, -17) fig, ax = plt.subplots() ax.plot(mag, model(mag)) ax.set_yscale('log') ax.set_xlim(-22.6, -17) ax.set_ylim(1.e-7, 1.e-2) ax.set_xlabel('$M_{UV}$') ax.set_ylabel('$\phi$ [mag$^{-1}$ Mpc$^{-3}]$') References ---------- .. [1] Schechter 1976; ApJ 203, 297 (https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract) .. [2] `Luminosity function <https://en.wikipedia.org/wiki/Luminosity_function_(astronomy)>`_ """ phi_star = Parameter( default=1.0, description="Normalization factor in units of number density" ) m_star = Parameter(default=-20.0, description="Characteristic magnitude", mag=True) alpha = Parameter(default=-1.0, description="Faint-end slope") @staticmethod def _factor(magnitude, m_star): factor_exp = magnitude - m_star if isinstance(factor_exp, Quantity): if factor_exp.unit == mag: factor_exp = Magnitude(factor_exp.value, unit=mag) return factor_exp.to(dimensionless_unscaled) else: raise UnitsError( "The units of magnitude and m_star must be a magnitude" ) else: return 10 ** (-0.4 * factor_exp) def evaluate(self, mag, phi_star, m_star, alpha): """Schechter luminosity function model function.""" factor = self._factor(mag, m_star) return 0.4 * np.log(10) * phi_star * factor ** (alpha + 1) * np.exp(-factor) def fit_deriv(self, mag, phi_star, m_star, alpha): """ Schechter luminosity function derivative with respect to parameters. """ factor = self._factor(mag, m_star) d_phi_star = 0.4 * np.log(10) * factor ** (alpha + 1) * np.exp(-factor) func = phi_star * d_phi_star d_m_star = (alpha + 1) * 0.4 * np.log(10) * func - ( 0.4 * np.log(10) * func * factor ) d_alpha = func * np.log(factor) return [d_phi_star, d_m_star, d_alpha] @property def input_units(self): if self.m_star.unit is None: return None return {self.inputs[0]: self.m_star.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "m_star": inputs_unit[self.inputs[0]], "phi_star": outputs_unit[self.outputs[0]], }
8924ee41df6e158985c8efe96fc0d7d7bd821b93602a43336388468ef5355e5d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import os import queue import select import socket import threading import time import uuid import warnings import xmlrpc.client as xmlrpc from urllib.parse import urlunparse from astropy import log from .constants import SAMP_STATUS_OK, __profile_version__ from .errors import SAMPHubError, SAMPProxyError, SAMPWarning from .lockfile_helpers import create_lock_file, read_lockfile from .standard_profile import ThreadingXMLRPCServer from .utils import ServerProxyPool, _HubAsClient, internet_on from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog __all__ = ["SAMPHubServer", "WebProfileDialog"] __doctest_skip__ = [".", "SAMPHubServer.*"] class SAMPHubServer: """ SAMP Hub Server. Parameters ---------- secret : str, optional The secret code to use for the SAMP lockfile. If none is is specified, the :func:`uuid.uuid1` function is used to generate one. addr : str, optional Listening address (or IP). This defaults to 127.0.0.1 if the internet is not reachable, otherwise it defaults to the host name. port : int, optional Listening XML-RPC server socket port. If left set to 0 (the default), the operating system will select a free port. lockfile : str, optional Custom lockfile name. timeout : int, optional Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically stops after an inactivity period longer than ``timeout`` seconds. By default ``timeout`` is set to 0 (Hub never expires). client_timeout : int, optional Client inactivity timeout. If ``client_timeout > 0`` then the Hub automatically unregisters the clients which result inactive for a period longer than ``client_timeout`` seconds. By default ``client_timeout`` is set to 0 (clients never expire). mode : str, optional Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub runs using the standard ``.samp`` lock-file, having a single instance for user desktop session. Otherwise, if ``mode`` is ``'multiple'``, then the Hub runs using a non-standard lock-file, placed in ``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where ``<UUID>`` is a unique UUID assigned to the hub. label : str, optional A string used to label the Hub with a human readable name. This string is written in the lock-file assigned to the ``hub.label`` token. web_profile : bool, optional Enables or disables the Web Profile support. web_profile_dialog : class, optional Allows a class instance to be specified using ``web_profile_dialog`` to replace the terminal-based message with e.g. a GUI pop-up. Two `queue.Queue` instances will be added to the instance as attributes ``queue_request`` and ``queue_result``. When a request is received via the ``queue_request`` queue, the pop-up should be displayed, and a value of `True` or `False` should be added to ``queue_result`` depending on whether the user accepted or refused the connection. web_port : int, optional The port to use for web SAMP. This should not be changed except for testing purposes, since web SAMP should always use port 21012. pool_size : int, optional The number of socket connections opened to communicate with the clients. """ def __init__( self, secret=None, addr=None, port=0, lockfile=None, timeout=0, client_timeout=0, mode="single", label="", web_profile=True, web_profile_dialog=None, web_port=21012, pool_size=20, ): # Generate random ID for the hub self._id = str(uuid.uuid1()) # General settings self._is_running = False self._customlockfilename = lockfile self._lockfile = None self._addr = addr self._port = port self._mode = mode self._label = label self._timeout = timeout self._client_timeout = client_timeout self._pool_size = pool_size # Web profile specific attributes self._web_profile = web_profile self._web_profile_dialog = web_profile_dialog self._web_port = web_port self._web_profile_server = None self._web_profile_callbacks = {} self._web_profile_requests_queue = None self._web_profile_requests_result = None self._web_profile_requests_semaphore = None self._host_name = "127.0.0.1" if internet_on(): try: self._host_name = socket.getfqdn() socket.getaddrinfo(self._addr or self._host_name, self._port or 0) except OSError: self._host_name = "127.0.0.1" # Threading stuff self._thread_lock = threading.Lock() self._thread_run = None self._thread_hub_timeout = None self._thread_client_timeout = None self._launched_threads = [] # Variables for timeout testing: self._last_activity_time = None self._client_activity_time = {} # Hub message id counter, used to create hub msg ids self._hub_msg_id_counter = 0 # Hub secret code self._hub_secret_code_customized = secret self._hub_secret = self._create_secret_code() # Hub public id (as SAMP client) self._hub_public_id = "" # Client ids # {private_key: (public_id, timestamp)} self._private_keys = {} # Metadata per client # {private_key: metadata} self._metadata = {} # List of subscribed clients per MType # {mtype: private_key list} self._mtype2ids = {} # List of subscribed MTypes per client # {private_key: mtype list} self._id2mtypes = {} # List of XML-RPC addresses per client # {public_id: (XML-RPC address, ServerProxyPool instance)} self._xmlrpc_endpoints = {} # Synchronous message id heap self._sync_msg_ids_heap = {} # Public ids counter self._client_id_counter = -1 @property def id(self): """ The unique hub ID. """ return self._id def _register_standard_api(self, server): # Standard Profile only operations server.register_function(self._ping, "samp.hub.ping") server.register_function( self._set_xmlrpc_callback, "samp.hub.setXmlrpcCallback" ) # Standard API operations server.register_function(self._register, "samp.hub.register") server.register_function(self._unregister, "samp.hub.unregister") server.register_function(self._declare_metadata, "samp.hub.declareMetadata") server.register_function(self._get_metadata, "samp.hub.getMetadata") server.register_function( self._declare_subscriptions, "samp.hub.declareSubscriptions" ) server.register_function(self._get_subscriptions, "samp.hub.getSubscriptions") server.register_function( self._get_registered_clients, "samp.hub.getRegisteredClients" ) server.register_function( self._get_subscribed_clients, "samp.hub.getSubscribedClients" ) server.register_function(self._notify, "samp.hub.notify") server.register_function(self._notify_all, "samp.hub.notifyAll") server.register_function(self._call, "samp.hub.call") server.register_function(self._call_all, "samp.hub.callAll") server.register_function(self._call_and_wait, "samp.hub.callAndWait") server.register_function(self._reply, "samp.hub.reply") def _register_web_profile_api(self, server): # Web Profile methods like Standard Profile server.register_function(self._ping, "samp.webhub.ping") server.register_function(self._unregister, "samp.webhub.unregister") server.register_function(self._declare_metadata, "samp.webhub.declareMetadata") server.register_function(self._get_metadata, "samp.webhub.getMetadata") server.register_function( self._declare_subscriptions, "samp.webhub.declareSubscriptions" ) server.register_function( self._get_subscriptions, "samp.webhub.getSubscriptions" ) server.register_function( self._get_registered_clients, "samp.webhub.getRegisteredClients" ) server.register_function( self._get_subscribed_clients, "samp.webhub.getSubscribedClients" ) server.register_function(self._notify, "samp.webhub.notify") server.register_function(self._notify_all, "samp.webhub.notifyAll") server.register_function(self._call, "samp.webhub.call") server.register_function(self._call_all, "samp.webhub.callAll") server.register_function(self._call_and_wait, "samp.webhub.callAndWait") server.register_function(self._reply, "samp.webhub.reply") # Methods particularly for Web Profile server.register_function(self._web_profile_register, "samp.webhub.register") server.register_function( self._web_profile_allowReverseCallbacks, "samp.webhub.allowReverseCallbacks" ) server.register_function( self._web_profile_pullCallbacks, "samp.webhub.pullCallbacks" ) def _start_standard_server(self): self._server = ThreadingXMLRPCServer( (self._addr or self._host_name, self._port or 0), log, logRequests=False, allow_none=True, ) prot = "http" self._port = self._server.socket.getsockname()[1] addr = f"{self._addr or self._host_name}:{self._port}" self._url = urlunparse((prot, addr, "", "", "", "")) self._server.register_introspection_functions() self._register_standard_api(self._server) def _start_web_profile_server(self): self._web_profile_requests_queue = queue.Queue(1) self._web_profile_requests_result = queue.Queue(1) self._web_profile_requests_semaphore = queue.Queue(1) if self._web_profile_dialog is not None: # TODO: Some sort of duck-typing on the web_profile_dialog object self._web_profile_dialog.queue_request = self._web_profile_requests_queue self._web_profile_dialog.queue_result = self._web_profile_requests_result try: self._web_profile_server = WebProfileXMLRPCServer( ("localhost", self._web_port), log, logRequests=False, allow_none=True ) self._web_port = self._web_profile_server.socket.getsockname()[1] self._web_profile_server.register_introspection_functions() self._register_web_profile_api(self._web_profile_server) log.info("Hub set to run with Web Profile support enabled.") except OSError: log.warning( "Port {} already in use. Impossible to run the " "Hub with Web Profile support.".format(self._web_port), SAMPWarning, ) self._web_profile = False # Cleanup self._web_profile_requests_queue = None self._web_profile_requests_result = None self._web_profile_requests_semaphore = None def _launch_thread(self, group=None, target=None, name=None, args=None): # Remove inactive threads remove = [] for t in self._launched_threads: if not t.is_alive(): remove.append(t) for t in remove: self._launched_threads.remove(t) # Start new thread t = threading.Thread(group=group, target=target, name=name, args=args) t.start() # Add to list of launched threads self._launched_threads.append(t) def _join_launched_threads(self, timeout=None): for t in self._launched_threads: t.join(timeout=timeout) def _timeout_test_hub(self): if self._timeout == 0: return last = time.time() while self._is_running: time.sleep(0.05) # keep this small to check _is_running often now = time.time() if now - last > 1.0: with self._thread_lock: if self._last_activity_time is not None: if now - self._last_activity_time >= self._timeout: warnings.warn( "Timeout expired, Hub is shutting down!", SAMPWarning ) self.stop() return last = now def _timeout_test_client(self): if self._client_timeout == 0: return last = time.time() while self._is_running: time.sleep(0.05) # keep this small to check _is_running often now = time.time() if now - last > 1.0: for private_key in self._client_activity_time.keys(): if ( now - self._client_activity_time[private_key] > self._client_timeout and private_key != self._hub_private_key ): warnings.warn( f"Client {private_key} timeout expired!", SAMPWarning ) self._notify_disconnection(private_key) self._unregister(private_key) last = now def _hub_as_client_request_handler(self, method, args): if method == "samp.client.receiveCall": return self._receive_call(*args) elif method == "samp.client.receiveNotification": return self._receive_notification(*args) elif method == "samp.client.receiveResponse": return self._receive_response(*args) elif method == "samp.app.ping": return self._ping(*args) def _setup_hub_as_client(self): hub_metadata = { "samp.name": "Astropy SAMP Hub", "samp.description.text": self._label, "author.name": "The Astropy Collaboration", "samp.documentation.url": "https://docs.astropy.org/en/stable/samp", "samp.icon.url": self._url + "/samp/icon", } result = self._register(self._hub_secret) self._hub_public_id = result["samp.self-id"] self._hub_private_key = result["samp.private-key"] self._set_xmlrpc_callback(self._hub_private_key, self._url) self._declare_metadata(self._hub_private_key, hub_metadata) self._declare_subscriptions( self._hub_private_key, {"samp.app.ping": {}, "x-samp.query.by-meta": {}} ) def start(self, wait=False): """ Start the current SAMP Hub instance and create the lock file. Hub start-up can be blocking or non blocking depending on the ``wait`` parameter. Parameters ---------- wait : bool If `True` then the Hub process is joined with the caller, blocking the code flow. Usually `True` option is used to run a stand-alone Hub in an executable script. If `False` (default), then the Hub process runs in a separated thread. `False` is usually used in a Python shell. """ if self._is_running: raise SAMPHubError("Hub is already running") if self._lockfile is not None: raise SAMPHubError("Hub is not running but lockfile is set") if self._web_profile: self._start_web_profile_server() self._start_standard_server() self._lockfile = create_lock_file( lockfilename=self._customlockfilename, mode=self._mode, hub_id=self.id, hub_params=self.params, ) self._update_last_activity_time() self._setup_hub_as_client() self._start_threads() log.info("Hub started") if wait and self._is_running: self._thread_run.join() self._thread_run = None @property def params(self): """ The hub parameters (which are written to the logfile) """ params = {} # Keys required by standard profile params["samp.secret"] = self._hub_secret params["samp.hub.xmlrpc.url"] = self._url params["samp.profile.version"] = __profile_version__ # Custom keys params["hub.id"] = self.id params["hub.label"] = self._label or f"Hub {self.id}" return params def _start_threads(self): self._thread_run = threading.Thread(target=self._serve_forever) self._thread_run.daemon = True if self._timeout > 0: self._thread_hub_timeout = threading.Thread( target=self._timeout_test_hub, name="Hub timeout test" ) self._thread_hub_timeout.daemon = True else: self._thread_hub_timeout = None if self._client_timeout > 0: self._thread_client_timeout = threading.Thread( target=self._timeout_test_client, name="Client timeout test" ) self._thread_client_timeout.daemon = True else: self._thread_client_timeout = None self._is_running = True self._thread_run.start() if self._thread_hub_timeout is not None: self._thread_hub_timeout.start() if self._thread_client_timeout is not None: self._thread_client_timeout.start() def _create_secret_code(self): if self._hub_secret_code_customized is not None: return self._hub_secret_code_customized else: return str(uuid.uuid1()) def stop(self): """ Stop the current SAMP Hub instance and delete the lock file. """ if not self._is_running: return log.info("Hub is stopping...") self._notify_shutdown() self._is_running = False if self._lockfile and os.path.isfile(self._lockfile): lockfiledict = read_lockfile(self._lockfile) if lockfiledict["samp.secret"] == self._hub_secret: os.remove(self._lockfile) self._lockfile = None # Reset variables # TODO: What happens if not all threads are stopped after timeout? self._join_all_threads(timeout=10.0) self._hub_msg_id_counter = 0 self._hub_secret = self._create_secret_code() self._hub_public_id = "" self._metadata = {} self._private_keys = {} self._mtype2ids = {} self._id2mtypes = {} self._xmlrpc_endpoints = {} self._last_activity_time = None log.info("Hub stopped.") def _join_all_threads(self, timeout=None): # In some cases, ``stop`` may be called from some of the sub-threads, # so we just need to make sure that we don't try and shut down the # calling thread. current_thread = threading.current_thread() if self._thread_run is not current_thread: self._thread_run.join(timeout=timeout) if not self._thread_run.is_alive(): self._thread_run = None if ( self._thread_hub_timeout is not None and self._thread_hub_timeout is not current_thread ): self._thread_hub_timeout.join(timeout=timeout) if not self._thread_hub_timeout.is_alive(): self._thread_hub_timeout = None if ( self._thread_client_timeout is not None and self._thread_client_timeout is not current_thread ): self._thread_client_timeout.join(timeout=timeout) if not self._thread_client_timeout.is_alive(): self._thread_client_timeout = None self._join_launched_threads(timeout=timeout) @property def is_running(self): """Return an information concerning the Hub running status. Returns ------- running : bool Is the hub running? """ return self._is_running def _serve_forever(self): while self._is_running: try: read_ready = select.select([self._server.socket], [], [], 0.01)[0] except OSError as exc: warnings.warn( f"Call to select() in SAMPHubServer failed: {exc}", SAMPWarning ) else: if read_ready: self._server.handle_request() if self._web_profile: # We now check if there are any connection requests from the # web profile, and if so, we initialize the pop-up. if self._web_profile_dialog is None: try: request = self._web_profile_requests_queue.get_nowait() except queue.Empty: pass else: web_profile_text_dialog( request, self._web_profile_requests_result ) # We now check for requests over the web profile socket, and we # also update the pop-up in case there are any changes. try: read_ready = select.select( [self._web_profile_server.socket], [], [], 0.01 )[0] except OSError as exc: warnings.warn( f"Call to select() in SAMPHubServer failed: {exc}", SAMPWarning ) else: if read_ready: self._web_profile_server.handle_request() self._server.server_close() if self._web_profile_server is not None: self._web_profile_server.server_close() def _notify_shutdown(self): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown") for mtype in msubs: if mtype in self._mtype2ids: for key in self._mtype2ids[mtype]: self._notify_( self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.shutdown", "samp.params": {}}, ) def _notify_register(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: # if key != private_key: self._notify( self._hub_private_key, self._private_keys[key][0], { "samp.mtype": "samp.hub.event.register", "samp.params": {"id": public_id}, }, ) def _notify_unregister(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: if key != private_key: self._notify( self._hub_private_key, self._private_keys[key][0], { "samp.mtype": "samp.hub.event.unregister", "samp.params": {"id": public_id}, }, ) def _notify_metadata(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: # if key != private_key: self._notify( self._hub_private_key, self._private_keys[key][0], { "samp.mtype": "samp.hub.event.metadata", "samp.params": { "id": public_id, "metadata": self._metadata[private_key], }, }, ) def _notify_subscriptions(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: self._notify( self._hub_private_key, self._private_keys[key][0], { "samp.mtype": "samp.hub.event.subscriptions", "samp.params": { "id": public_id, "subscriptions": self._id2mtypes[private_key], }, }, ) def _notify_disconnection(self, private_key): def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message): endpoint.samp.client.receiveNotification( private_key, hub_public_id, message ) msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect") public_id = self._private_keys[private_key][0] endpoint = self._xmlrpc_endpoints[public_id][1] for mtype in msubs: if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]: log.debug(f"notify disconnection to {public_id}") self._launch_thread( target=_xmlrpc_call_disconnect, args=( endpoint, private_key, self._hub_public_id, { "samp.mtype": "samp.hub.disconnect", "samp.params": {"reason": "Timeout expired!"}, }, ), ) def _ping(self): self._update_last_activity_time() log.debug("ping") return "1" def _query_by_metadata(self, key, value): public_id_list = [] for private_id in self._metadata: if key in self._metadata[private_id]: if self._metadata[private_id][key] == value: public_id_list.append(self._private_keys[private_id][0]) return public_id_list def _set_xmlrpc_callback(self, private_key, xmlrpc_addr): self._update_last_activity_time(private_key) if private_key in self._private_keys: if private_key == self._hub_private_key: public_id = self._private_keys[private_key][0] self._xmlrpc_endpoints[public_id] = ( xmlrpc_addr, _HubAsClient(self._hub_as_client_request_handler), ) return "" # Dictionary stored with the public id log.debug(f"set_xmlrpc_callback: {private_key} {xmlrpc_addr}") server_proxy_pool = None server_proxy_pool = ServerProxyPool( self._pool_size, xmlrpc.ServerProxy, xmlrpc_addr, allow_none=1 ) public_id = self._private_keys[private_key][0] self._xmlrpc_endpoints[public_id] = (xmlrpc_addr, server_proxy_pool) else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") return "" def _perform_standard_register(self): with self._thread_lock: private_key, public_id = self._get_new_ids() self._private_keys[private_key] = (public_id, time.time()) self._update_last_activity_time(private_key) self._notify_register(private_key) log.debug(f"register: private-key = {private_key} and self-id = {public_id}") return { "samp.self-id": public_id, "samp.private-key": private_key, "samp.hub-id": self._hub_public_id, } def _register(self, secret): self._update_last_activity_time() if secret == self._hub_secret: return self._perform_standard_register() else: # return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""} raise SAMPProxyError(7, "Bad secret code") def _get_new_ids(self): private_key = str(uuid.uuid1()) self._client_id_counter += 1 public_id = "cli#hub" if self._client_id_counter > 0: public_id = f"cli#{self._client_id_counter}" return private_key, public_id def _unregister(self, private_key): self._update_last_activity_time() public_key = "" self._notify_unregister(private_key) with self._thread_lock: if private_key in self._private_keys: public_key = self._private_keys[private_key][0] del self._private_keys[private_key] else: return "" if private_key in self._metadata: del self._metadata[private_key] if private_key in self._id2mtypes: del self._id2mtypes[private_key] for mtype in self._mtype2ids.keys(): if private_key in self._mtype2ids[mtype]: self._mtype2ids[mtype].remove(private_key) if public_key in self._xmlrpc_endpoints: del self._xmlrpc_endpoints[public_key] if private_key in self._client_activity_time: del self._client_activity_time[private_key] if self._web_profile: if private_key in self._web_profile_callbacks: del self._web_profile_callbacks[private_key] self._web_profile_server.remove_client(private_key) log.debug(f"unregister {public_key} ({private_key})") return "" def _declare_metadata(self, private_key, metadata): self._update_last_activity_time(private_key) if private_key in self._private_keys: log.debug( "declare_metadata: private-key = {} metadata = {}".format( private_key, str(metadata) ) ) self._metadata[private_key] = metadata self._notify_metadata(private_key) else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") return "" def _get_metadata(self, private_key, client_id): self._update_last_activity_time(private_key) if private_key in self._private_keys: client_private_key = self._public_id_to_private_key(client_id) log.debug( "get_metadata: private-key = {} client-id = {}".format( private_key, client_id ) ) if client_private_key is not None: if client_private_key in self._metadata: log.debug(f"--> metadata = {self._metadata[client_private_key]}") return self._metadata[client_private_key] else: return {} else: raise SAMPProxyError(6, "Invalid client ID") else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") def _declare_subscriptions(self, private_key, mtypes): self._update_last_activity_time(private_key) if private_key in self._private_keys: log.debug( "declare_subscriptions: private-key = {} mtypes = {}".format( private_key, str(mtypes) ) ) # remove subscription to previous mtypes if private_key in self._id2mtypes: prev_mtypes = self._id2mtypes[private_key] for mtype in prev_mtypes: try: self._mtype2ids[mtype].remove(private_key) except ValueError: # private_key is not in list pass self._id2mtypes[private_key] = copy.deepcopy(mtypes) # remove duplicated MType for wildcard overwriting original_mtypes = copy.deepcopy(mtypes) for mtype in original_mtypes: if mtype.endswith("*"): for mtype2 in original_mtypes: if mtype2.startswith(mtype[:-1]) and mtype2 != mtype: if mtype2 in mtypes: del mtypes[mtype2] log.debug( "declare_subscriptions: subscriptions accepted from {} => {}".format( private_key, str(mtypes) ) ) for mtype in mtypes: if mtype in self._mtype2ids: if private_key not in self._mtype2ids[mtype]: self._mtype2ids[mtype].append(private_key) else: self._mtype2ids[mtype] = [private_key] self._notify_subscriptions(private_key) else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") return "" def _get_subscriptions(self, private_key, client_id): self._update_last_activity_time(private_key) if private_key in self._private_keys: client_private_key = self._public_id_to_private_key(client_id) if client_private_key is not None: if client_private_key in self._id2mtypes: log.debug( "get_subscriptions: client-id = {} mtypes = {}".format( client_id, str(self._id2mtypes[client_private_key]) ) ) return self._id2mtypes[client_private_key] else: log.debug( "get_subscriptions: client-id = {} mtypes = missing".format( client_id ) ) return {} else: raise SAMPProxyError(6, "Invalid client ID") else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") def _get_registered_clients(self, private_key): self._update_last_activity_time(private_key) if private_key in self._private_keys: reg_clients = [] for pkey in self._private_keys.keys(): if pkey != private_key: reg_clients.append(self._private_keys[pkey][0]) log.debug( "get_registered_clients: private_key = {} clients = {}".format( private_key, reg_clients ) ) return reg_clients else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") def _get_subscribed_clients(self, private_key, mtype): self._update_last_activity_time(private_key) if private_key in self._private_keys: sub_clients = {} for pkey in self._private_keys.keys(): if pkey != private_key and self._is_subscribed(pkey, mtype): sub_clients[self._private_keys[pkey][0]] = {} log.debug( "get_subscribed_clients: private_key = {} mtype = {} " "clients = {}".format(private_key, mtype, sub_clients) ) return sub_clients else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") @staticmethod def get_mtype_subtypes(mtype): """ Return a list containing all the possible wildcarded subtypes of MType. Parameters ---------- mtype : str MType to be parsed. Returns ------- types : list List of subtypes Examples -------- >>> from astropy.samp import SAMPHubServer >>> SAMPHubServer.get_mtype_subtypes("samp.app.ping") ['samp.app.ping', 'samp.app.*', 'samp.*', '*'] """ subtypes = [] msubs = mtype.split(".") indexes = list(range(len(msubs))) indexes.reverse() indexes.append(-1) for i in indexes: tmp_mtype = ".".join(msubs[: i + 1]) if tmp_mtype != mtype: if tmp_mtype != "": tmp_mtype = tmp_mtype + ".*" else: tmp_mtype = "*" subtypes.append(tmp_mtype) return subtypes def _is_subscribed(self, private_key, mtype): subscribed = False msubs = SAMPHubServer.get_mtype_subtypes(mtype) for msub in msubs: if msub in self._mtype2ids: if private_key in self._mtype2ids[msub]: subscribed = True return subscribed def _notify(self, private_key, recipient_id, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if not ( self._is_subscribed( self._public_id_to_private_key(recipient_id), message["samp.mtype"] ) ): raise SAMPProxyError( 2, "Client {} not subscribed to MType {}".format( recipient_id, message["samp.mtype"] ), ) self._launch_thread( target=self._notify_, args=(private_key, recipient_id, message) ) return {} else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") def _notify_(self, sender_private_key, recipient_public_id, message): if sender_private_key not in self._private_keys: return sender_public_id = self._private_keys[sender_private_key][0] try: log.debug( "notify {} from {} to {}".format( message["samp.mtype"], sender_public_id, recipient_public_id ) ) recipient_private_key = self._public_id_to_private_key(recipient_public_id) arg_params = (sender_public_id, message) samp_method_name = "receiveNotification" self._retry_method( recipient_private_key, recipient_public_id, samp_method_name, arg_params ) except Exception as exc: warnings.warn( "{} notification from client {} to client {} failed [{}]".format( message["samp.mtype"], sender_public_id, recipient_public_id, exc ), SAMPWarning, ) def _notify_all(self, private_key, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if "samp.mtype" not in message: raise SAMPProxyError(3, "samp.mtype keyword is missing") recipient_ids = self._notify_all_(private_key, message) return recipient_ids else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") def _notify_all_(self, sender_private_key, message): recipient_ids = [] msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"]) for mtype in msubs: if mtype in self._mtype2ids: for key in self._mtype2ids[mtype]: if key != sender_private_key: _recipient_id = self._private_keys[key][0] recipient_ids.append(_recipient_id) self._launch_thread( target=self._notify, args=(sender_private_key, _recipient_id, message), ) return recipient_ids def _call(self, private_key, recipient_id, msg_tag, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if not ( self._is_subscribed( self._public_id_to_private_key(recipient_id), message["samp.mtype"] ) ): raise SAMPProxyError( 2, "Client {} not subscribed to MType {}".format( recipient_id, message["samp.mtype"] ), ) public_id = self._private_keys[private_key][0] msg_id = self._get_new_hub_msg_id(public_id, msg_tag) self._launch_thread( target=self._call_, args=(private_key, public_id, recipient_id, msg_id, message), ) return msg_id else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") def _call_( self, sender_private_key, sender_public_id, recipient_public_id, msg_id, message ): if sender_private_key not in self._private_keys: return try: log.debug( "call {} from {} to {} ({})".format( msg_id.split(";;")[0], sender_public_id, recipient_public_id, message["samp.mtype"], ) ) recipient_private_key = self._public_id_to_private_key(recipient_public_id) arg_params = (sender_public_id, msg_id, message) samp_methodName = "receiveCall" self._retry_method( recipient_private_key, recipient_public_id, samp_methodName, arg_params ) except Exception as exc: warnings.warn( "{} call {} from client {} to client {} failed [{},{}]".format( message["samp.mtype"], msg_id.split(";;")[0], sender_public_id, recipient_public_id, type(exc), exc, ), SAMPWarning, ) def _call_all(self, private_key, msg_tag, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if "samp.mtype" not in message: raise SAMPProxyError( 3, "samp.mtype keyword is missing in message tagged as {}".format( msg_tag ), ) public_id = self._private_keys[private_key][0] msg_id = self._call_all_(private_key, public_id, msg_tag, message) return msg_id else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") def _call_all_(self, sender_private_key, sender_public_id, msg_tag, message): msg_id = {} msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"]) for mtype in msubs: if mtype in self._mtype2ids: for key in self._mtype2ids[mtype]: if key != sender_private_key: _msg_id = self._get_new_hub_msg_id(sender_public_id, msg_tag) receiver_public_id = self._private_keys[key][0] msg_id[receiver_public_id] = _msg_id self._launch_thread( target=self._call_, args=( sender_private_key, sender_public_id, receiver_public_id, _msg_id, message, ), ) return msg_id def _call_and_wait(self, private_key, recipient_id, message, timeout): self._update_last_activity_time(private_key) if private_key in self._private_keys: timeout = int(timeout) now = time.time() response = {} msg_id = self._call(private_key, recipient_id, "samp::sync::call", message) self._sync_msg_ids_heap[msg_id] = None while self._is_running: if 0 < timeout <= time.time() - now: del self._sync_msg_ids_heap[msg_id] raise SAMPProxyError(1, "Timeout expired!") if self._sync_msg_ids_heap[msg_id] is not None: response = copy.deepcopy(self._sync_msg_ids_heap[msg_id]) del self._sync_msg_ids_heap[msg_id] break time.sleep(0.01) return response else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") def _reply(self, private_key, msg_id, response): """ The main method that gets called for replying. This starts up an asynchronous reply thread and returns. """ self._update_last_activity_time(private_key) if private_key in self._private_keys: self._launch_thread( target=self._reply_, args=(private_key, msg_id, response) ) else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") return {} def _reply_(self, responder_private_key, msg_id, response): if responder_private_key not in self._private_keys or not msg_id: return responder_public_id = self._private_keys[responder_private_key][0] counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split( ";;", 3 ) try: log.debug( "reply {} from {} to {}".format( counter, responder_public_id, recipient_public_id ) ) if recipient_msg_tag == "samp::sync::call": if msg_id in self._sync_msg_ids_heap.keys(): self._sync_msg_ids_heap[msg_id] = response else: recipient_private_key = self._public_id_to_private_key( recipient_public_id ) arg_params = (responder_public_id, recipient_msg_tag, response) samp_method_name = "receiveResponse" self._retry_method( recipient_private_key, recipient_public_id, samp_method_name, arg_params, ) except Exception as exc: warnings.warn( "{} reply from client {} to client {} failed [{}]".format( recipient_msg_tag, responder_public_id, recipient_public_id, exc ), SAMPWarning, ) def _retry_method( self, recipient_private_key, recipient_public_id, samp_method_name, arg_params ): """ This method is used to retry a SAMP call several times. Parameters ---------- recipient_private_key The private key of the receiver of the call recipient_public_key The public key of the receiver of the call samp_method_name : str The name of the SAMP method to call arg_params : tuple Any additional arguments to be passed to the SAMP method """ if recipient_private_key is None: raise SAMPHubError("Invalid client ID") from . import conf for attempt in range(conf.n_retries): if not self._is_running: time.sleep(0.01) continue try: if ( self._web_profile and recipient_private_key in self._web_profile_callbacks ): # Web Profile callback = { "samp.methodName": samp_method_name, "samp.params": arg_params, } self._web_profile_callbacks[recipient_private_key].put(callback) else: # Standard Profile hub = self._xmlrpc_endpoints[recipient_public_id][1] getattr(hub.samp.client, samp_method_name)( recipient_private_key, *arg_params ) except xmlrpc.Fault as exc: log.debug( "{} XML-RPC endpoint error (attempt {}): {}".format( recipient_public_id, attempt + 1, exc.faultString ) ) time.sleep(0.01) else: return # If we are here, then the above attempts failed error_message = ( samp_method_name + " failed after " + str(conf.n_retries) + " attempts" ) raise SAMPHubError(error_message) def _public_id_to_private_key(self, public_id): for private_key in self._private_keys.keys(): if self._private_keys[private_key][0] == public_id: return private_key return None def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id): with self._thread_lock: self._hub_msg_id_counter += 1 return "msg#{};;{};;{};;{}".format( self._hub_msg_id_counter, self._hub_public_id, sender_public_id, sender_msg_id, ) def _update_last_activity_time(self, private_key=None): with self._thread_lock: self._last_activity_time = time.time() if private_key is not None: self._client_activity_time[private_key] = time.time() def _receive_notification(self, private_key, sender_id, message): return "" def _receive_call(self, private_key, sender_id, msg_id, message): if private_key == self._hub_private_key: if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping": self._reply( self._hub_private_key, msg_id, {"samp.status": SAMP_STATUS_OK, "samp.result": {}}, ) elif "samp.mtype" in message and ( message["samp.mtype"] == "x-samp.query.by-meta" or message["samp.mtype"] == "samp.query.by-meta" ): ids_list = self._query_by_metadata( message["samp.params"]["key"], message["samp.params"]["value"] ) self._reply( self._hub_private_key, msg_id, {"samp.status": SAMP_STATUS_OK, "samp.result": {"ids": ids_list}}, ) return "" else: return "" def _receive_response(self, private_key, responder_id, msg_tag, response): return "" def _web_profile_register( self, identity_info, client_address=("unknown", 0), origin="unknown" ): self._update_last_activity_time() if not client_address[0] in ["localhost", "127.0.0.1"]: raise SAMPProxyError(403, "Request of registration rejected by the Hub.") if not origin: origin = "unknown" if isinstance(identity_info, dict): # an old version of the protocol provided just a string with the app name if "samp.name" not in identity_info: raise SAMPProxyError( 403, "Request of registration rejected " "by the Hub (application name not " "provided).", ) # Red semaphore for the other threads self._web_profile_requests_semaphore.put("wait") # Set the request to be displayed for the current thread self._web_profile_requests_queue.put((identity_info, client_address, origin)) # Get the popup dialogue response response = self._web_profile_requests_result.get() # OK, semaphore green self._web_profile_requests_semaphore.get() if response: register_map = self._perform_standard_register() translator_url = "http://localhost:{}/translator/{}?ref=".format( self._web_port, register_map["samp.private-key"] ) register_map["samp.url-translator"] = translator_url self._web_profile_server.add_client(register_map["samp.private-key"]) return register_map else: raise SAMPProxyError(403, "Request of registration rejected by the user.") def _web_profile_allowReverseCallbacks(self, private_key, allow): self._update_last_activity_time() if private_key in self._private_keys: if allow == "0": if private_key in self._web_profile_callbacks: del self._web_profile_callbacks[private_key] else: self._web_profile_callbacks[private_key] = queue.Queue() else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") return "" def _web_profile_pullCallbacks(self, private_key, timeout_secs): self._update_last_activity_time() if private_key in self._private_keys: callback = [] callback_queue = self._web_profile_callbacks[private_key] try: while self._is_running: item_queued = callback_queue.get_nowait() callback.append(item_queued) except queue.Empty: pass return callback else: raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.") class WebProfileDialog: """ A base class to make writing Web Profile GUI consent dialogs easier. The concrete class must: 1) Poll ``handle_queue`` periodically, using the timer services of the GUI's event loop. This function will call ``self.show_dialog`` when a request requires authorization. ``self.show_dialog`` will be given the arguments: - ``samp_name``: The name of the application making the request. - ``details``: A dictionary of details about the client making the request. - ``client``: A hostname, port pair containing the client address. - ``origin``: A string containing the origin of the request. 2) Call ``consent`` or ``reject`` based on the user's response to the dialog. """ def handle_queue(self): try: request = self.queue_request.get_nowait() except queue.Empty: # queue is set but empty pass except AttributeError: # queue has not been set yet pass else: if isinstance(request[0], str): # To support the old protocol version samp_name = request[0] else: samp_name = request[0]["samp.name"] self.show_dialog(samp_name, request[0], request[1], request[2]) def consent(self): self.queue_result.put(True) def reject(self): self.queue_result.put(False)
23651ba10013bcb227fcf1724289eab76bfe0eb0cf3c01c0ade0e081d822e8cc
# Licensed under a 3-clause BSD style license - see LICENSE.rst # TODO: this file should be refactored to use a more thread-safe and # race-condition-safe lockfile mechanism. import datetime import os import stat import warnings import xmlrpc.client as xmlrpc from contextlib import suppress from urllib.parse import urlparse from astropy import log from astropy.config.paths import _find_home from astropy.utils.data import get_readable_fileobj from .errors import SAMPHubError, SAMPWarning def read_lockfile(lockfilename): """ Read in the lockfile given by ``lockfilename`` into a dictionary. """ # lockfilename may be a local file or a remote URL, but # get_readable_fileobj takes care of this. lockfiledict = {} with get_readable_fileobj(lockfilename) as f: for line in f: if not line.startswith("#"): kw, val = line.split("=") lockfiledict[kw.strip()] = val.strip() return lockfiledict def write_lockfile(lockfilename, lockfiledict): lockfile = open(lockfilename, "w") lockfile.close() os.chmod(lockfilename, stat.S_IREAD + stat.S_IWRITE) lockfile = open(lockfilename, "w") now_iso = datetime.datetime.now().isoformat() lockfile.write(f"# SAMP lockfile written on {now_iso}\n") lockfile.write("# Standard Profile required keys\n") for key, value in lockfiledict.items(): lockfile.write(f"{key}={value}\n") lockfile.close() def create_lock_file(lockfilename=None, mode=None, hub_id=None, hub_params=None): # Remove lock-files of dead hubs remove_garbage_lock_files() lockfiledir = "" # CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE if "SAMP_HUB" in os.environ: # For the time being I assume just the std profile supported. if os.environ["SAMP_HUB"].startswith("std-lockurl:"): lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:") :] lockfile_parsed = urlparse(lockfilename) if lockfile_parsed[0] != "file": warnings.warn( "Unable to start a Hub with lockfile {}. " "Start-up process aborted.".format(lockfilename), SAMPWarning, ) return False else: lockfilename = lockfile_parsed[2] else: # If it is a fresh Hub instance if lockfilename is None: log.debug("Running mode: " + mode) if mode == "single": lockfilename = os.path.join(_find_home(), ".samp") else: lockfiledir = os.path.join(_find_home(), ".samp-1") # If missing create .samp-1 directory try: os.mkdir(lockfiledir) except OSError: pass # directory already exists finally: os.chmod(lockfiledir, stat.S_IREAD + stat.S_IWRITE + stat.S_IEXEC) lockfilename = os.path.join(lockfiledir, f"samp-hub-{hub_id}") else: log.debug("Running mode: multiple") hub_is_running, lockfiledict = check_running_hub(lockfilename) if hub_is_running: warnings.warn( "Another SAMP Hub is already running. Start-up process aborted.", SAMPWarning, ) return False log.debug("Lock-file: " + lockfilename) write_lockfile(lockfilename, hub_params) return lockfilename def get_main_running_hub(): """ Get either the hub given by the environment variable SAMP_HUB, or the one given by the lockfile .samp in the user home directory. """ hubs = get_running_hubs() if not hubs: raise SAMPHubError("Unable to find a running SAMP Hub.") # CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE if "SAMP_HUB" in os.environ: # For the time being I assume just the std profile supported. if os.environ["SAMP_HUB"].startswith("std-lockurl:"): lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:") :] else: raise SAMPHubError("SAMP Hub profile not supported.") else: lockfilename = os.path.join(_find_home(), ".samp") return hubs[lockfilename] def get_running_hubs(): """ Return a dictionary containing the lock-file contents of all the currently running hubs (single and/or multiple mode). The dictionary format is: ``{<lock-file>: {<token-name>: <token-string>, ...}, ...}`` where ``{<lock-file>}`` is the lock-file name, ``{<token-name>}`` and ``{<token-string>}`` are the lock-file tokens (name and content). Returns ------- running_hubs : dict Lock-file contents of all the currently running hubs. """ hubs = {} lockfilename = "" # HUB SINGLE INSTANCE MODE # CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE if "SAMP_HUB" in os.environ: # For the time being I assume just the std profile supported. if os.environ["SAMP_HUB"].startswith("std-lockurl:"): lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:") :] else: lockfilename = os.path.join(_find_home(), ".samp") hub_is_running, lockfiledict = check_running_hub(lockfilename) if hub_is_running: hubs[lockfilename] = lockfiledict # HUB MULTIPLE INSTANCE MODE lockfiledir = "" lockfiledir = os.path.join(_find_home(), ".samp-1") if os.path.isdir(lockfiledir): for filename in os.listdir(lockfiledir): if filename.startswith("samp-hub"): lockfilename = os.path.join(lockfiledir, filename) hub_is_running, lockfiledict = check_running_hub(lockfilename) if hub_is_running: hubs[lockfilename] = lockfiledict return hubs def check_running_hub(lockfilename): """ Test whether a hub identified by ``lockfilename`` is running or not. Parameters ---------- lockfilename : str Lock-file name (path + file name) of the Hub to be tested. Returns ------- is_running : bool Whether the hub is running hub_params : dict If the hub is running this contains the parameters from the lockfile """ is_running = False lockfiledict = {} # Check whether a lockfile already exists try: lockfiledict = read_lockfile(lockfilename) except OSError: return is_running, lockfiledict if "samp.hub.xmlrpc.url" in lockfiledict: try: proxy = xmlrpc.ServerProxy( lockfiledict["samp.hub.xmlrpc.url"].replace("\\", ""), allow_none=1 ) proxy.samp.hub.ping() is_running = True except xmlrpc.ProtocolError: # There is a protocol error (e.g. for authentication required), # but the server is alive is_running = True except OSError: pass return is_running, lockfiledict def remove_garbage_lock_files(): lockfilename = "" # HUB SINGLE INSTANCE MODE lockfilename = os.path.join(_find_home(), ".samp") hub_is_running, lockfiledict = check_running_hub(lockfilename) if not hub_is_running: # If lockfilename belongs to a dead hub, then it is deleted if os.path.isfile(lockfilename): with suppress(OSError): os.remove(lockfilename) # HUB MULTIPLE INSTANCE MODE lockfiledir = os.path.join(_find_home(), ".samp-1") if os.path.isdir(lockfiledir): for filename in os.listdir(lockfiledir): if filename.startswith("samp-hub"): lockfilename = os.path.join(lockfiledir, filename) hub_is_running, lockfiledict = check_running_hub(lockfilename) if not hub_is_running: # If lockfilename belongs to a dead hub, then it is deleted if os.path.isfile(lockfilename): with suppress(OSError): os.remove(lockfilename)
c18c05ce94e03e0fe182ae7895b4e4e1d775736619bc3a5ee05033bebb1eb6cf
# Licensed under a 3-clause BSD style license - see LICENSE.rst import argparse import copy import sys import time from astropy import __version__, log from .hub import SAMPHubServer __all__ = ["hub_script"] def hub_script(timeout=0): """ This main function is executed by the ``samp_hub`` command line tool. """ parser = argparse.ArgumentParser(prog="samp_hub " + __version__) parser.add_argument( "-k", "--secret", dest="secret", metavar="CODE", help="custom secret code." ) parser.add_argument( "-d", "--addr", dest="addr", metavar="ADDR", help="listening address (or IP)." ) parser.add_argument( "-p", "--port", dest="port", metavar="PORT", type=int, help="listening port number.", ) parser.add_argument( "-f", "--lockfile", dest="lockfile", metavar="FILE", help="custom lockfile." ) parser.add_argument( "-w", "--no-web-profile", dest="web_profile", action="store_false", help="run the Hub disabling the Web Profile.", default=True, ) parser.add_argument( "-P", "--pool-size", dest="pool_size", metavar="SIZE", type=int, help="the socket connections pool size.", default=20, ) timeout_group = parser.add_argument_group( "Timeout group", "Special options to setup hub and client timeouts." "It contains a set of special options that allows to set up the Hub and " "clients inactivity timeouts, that is the Hub or client inactivity time " "interval after which the Hub shuts down or unregisters the client. " "Notification of samp.hub.disconnect MType is sent to the clients " "forcibly unregistered for timeout expiration.", ) timeout_group.add_argument( "-t", "--timeout", dest="timeout", metavar="SECONDS", help=( "set the Hub inactivity timeout in SECONDS. By default it " "is set to 0, that is the Hub never expires." ), type=int, default=0, ) timeout_group.add_argument( "-c", "--client-timeout", dest="client_timeout", metavar="SECONDS", help=( "set the client inactivity timeout in SECONDS. By default it " "is set to 0, that is the client never expires." ), type=int, default=0, ) parser.add_argument_group(timeout_group) log_group = parser.add_argument_group( "Logging options", "Additional options which allow to customize the logging output. By " "default the SAMP Hub uses the standard output and standard error " "devices to print out INFO level logging messages. Using the options " "here below it is possible to modify the logging level and also " "specify the output files where redirect the logging messages.", ) log_group.add_argument( "-L", "--log-level", dest="loglevel", metavar="LEVEL", help="set the Hub instance log level (OFF, ERROR, WARNING, INFO, DEBUG).", type=str, choices=["OFF", "ERROR", "WARNING", "INFO", "DEBUG"], default="INFO", ) log_group.add_argument( "-O", "--log-output", dest="logout", metavar="FILE", help="set the output file for the log messages.", default="", ) parser.add_argument_group(log_group) adv_group = parser.add_argument_group( "Advanced group", "Advanced options addressed to facilitate administrative tasks and " "allow new non-standard Hub behaviors. In particular the --label " "options is used to assign a value to hub.label token and is used to " "assign a name to the Hub instance. " "The very special --multi option allows to start a Hub in multi-instance mode. " "Multi-instance mode is a non-standard Hub behavior that enables " "multiple contemporaneous running Hubs. Multi-instance hubs place " "their non-standard lock-files within the <home directory>/.samp-1 " "directory naming them making use of the format: " "samp-hub-<PID>-<ID>, where PID is the Hub process ID while ID is an " "internal ID (integer).", ) adv_group.add_argument( "-l", "--label", dest="label", metavar="LABEL", help="assign a LABEL to the Hub.", default="", ) adv_group.add_argument( "-m", "--multi", dest="mode", help=( "run the Hub in multi-instance mode generating a custom " "lockfile with a random name." ), action="store_const", const="multiple", default="single", ) parser.add_argument_group(adv_group) options = parser.parse_args() try: if options.loglevel in ("OFF", "ERROR", "WARNING", "DEBUG", "INFO"): log.setLevel(options.loglevel) if options.logout != "": context = log.log_to_file(options.logout) else: class dummy_context: def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass context = dummy_context() with context: args = copy.deepcopy(options.__dict__) del args["loglevel"] del args["logout"] hub = SAMPHubServer(**args) hub.start(False) if not timeout: while hub.is_running: time.sleep(0.01) else: time.sleep(timeout) hub.stop() except KeyboardInterrupt: try: hub.stop() except NameError: pass except OSError as e: print(f"[SAMP] Error: I/O error({e.errno}): {e.strerror}") sys.exit(1) except SystemExit: pass
4e1027ddcacecccdf5ffddcf2eb8d2a7b113a6ffabbff517d584a409295d1192
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Defines custom errors and exceptions used in `astropy.samp`. """ import xmlrpc.client as xmlrpc from astropy.utils.exceptions import AstropyUserWarning __all__ = ["SAMPWarning", "SAMPHubError", "SAMPClientError", "SAMPProxyError"] class SAMPWarning(AstropyUserWarning): """ SAMP-specific Astropy warning class """ class SAMPHubError(Exception): """ SAMP Hub exception. """ class SAMPClientError(Exception): """ SAMP Client exceptions. """ class SAMPProxyError(xmlrpc.Fault): """ SAMP Proxy Hub exception """
abb7f417479bd09623dbe7f750598db285f719a35bcc3f2799d435b5d2adb8f7
# Licensed under a 3-clause BSD style license - see LICENSE.rst from urllib.parse import parse_qs from urllib.request import urlopen from astropy.utils.data import get_pkg_data_contents from .standard_profile import SAMPSimpleXMLRPCRequestHandler, ThreadingXMLRPCServer __all__ = [] CROSS_DOMAIN = get_pkg_data_contents("data/crossdomain.xml") CLIENT_ACCESS_POLICY = get_pkg_data_contents("data/clientaccesspolicy.xml") class WebProfileRequestHandler(SAMPSimpleXMLRPCRequestHandler): """ Handler of XMLRPC requests performed through the Web Profile. """ def _send_CORS_header(self): if self.headers.get("Origin") is not None: method = self.headers.get("Access-Control-Request-Method") if method and self.command == "OPTIONS": # Preflight method self.send_header("Content-Length", "0") self.send_header( "Access-Control-Allow-Origin", self.headers.get("Origin") ) self.send_header("Access-Control-Allow-Methods", method) self.send_header("Access-Control-Allow-Headers", "Content-Type") self.send_header("Access-Control-Allow-Credentials", "true") else: # Simple method self.send_header( "Access-Control-Allow-Origin", self.headers.get("Origin") ) self.send_header("Access-Control-Allow-Headers", "Content-Type") self.send_header("Access-Control-Allow-Credentials", "true") def end_headers(self): self._send_CORS_header() SAMPSimpleXMLRPCRequestHandler.end_headers(self) def _serve_cross_domain_xml(self): cross_domain = False if self.path == "/crossdomain.xml": # Adobe standard response = CROSS_DOMAIN self.send_response(200, "OK") self.send_header("Content-Type", "text/x-cross-domain-policy") self.send_header("Content-Length", f"{len(response)}") self.end_headers() self.wfile.write(response.encode("utf-8")) self.wfile.flush() cross_domain = True elif self.path == "/clientaccesspolicy.xml": # Microsoft standard response = CLIENT_ACCESS_POLICY self.send_response(200, "OK") self.send_header("Content-Type", "text/xml") self.send_header("Content-Length", f"{len(response)}") self.end_headers() self.wfile.write(response.encode("utf-8")) self.wfile.flush() cross_domain = True return cross_domain def do_POST(self): if self._serve_cross_domain_xml(): return return SAMPSimpleXMLRPCRequestHandler.do_POST(self) def do_HEAD(self): if not self.is_http_path_valid(): self.report_404() return if self._serve_cross_domain_xml(): return def do_OPTIONS(self): self.send_response(200, "OK") self.end_headers() def do_GET(self): if not self.is_http_path_valid(): self.report_404() return split_path = self.path.split("?") if split_path[0] in [f"/translator/{clid}" for clid in self.server.clients]: # Request of a file proxying urlpath = parse_qs(split_path[1]) try: proxyfile = urlopen(urlpath["ref"][0]) self.send_response(200, "OK") self.end_headers() self.wfile.write(proxyfile.read()) proxyfile.close() except OSError: self.report_404() return if self._serve_cross_domain_xml(): return def is_http_path_valid(self): valid_paths = ["/clientaccesspolicy.xml", "/crossdomain.xml"] + [ f"/translator/{clid}" for clid in self.server.clients ] return self.path.split("?")[0] in valid_paths class WebProfileXMLRPCServer(ThreadingXMLRPCServer): """ XMLRPC server supporting the SAMP Web Profile. """ def __init__( self, addr, log=None, requestHandler=WebProfileRequestHandler, logRequests=True, allow_none=True, encoding=None, ): self.clients = [] ThreadingXMLRPCServer.__init__( self, addr, log, requestHandler, logRequests, allow_none, encoding ) def add_client(self, client_id): self.clients.append(client_id) def remove_client(self, client_id): try: self.clients.remove(client_id) except ValueError: # No warning here because this method gets called for all clients, # not just web clients, and we expect it to fail for non-web # clients. pass def web_profile_text_dialog(request, queue): samp_name = "unknown" if isinstance(request[0], str): # To support the old protocol version samp_name = request[0] else: samp_name = request[0]["samp.name"] text = f"""A Web application which declares to be Name: {samp_name} Origin: {request[2]} is requesting to be registered with the SAMP Hub. Pay attention that if you permit its registration, such application will acquire all current user privileges, like file read/write. Do you give your consent? [yes|no]""" print(text) answer = input(">>> ") queue.put(answer.lower() in ["yes", "y"])