hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
d8d576a153140c54c6716630bef0aec17df83337d0078bb607e55bc9642b462c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for putting velocity differentials into SkyCoord objects.
Note: the skyoffset velocity tests are in a different file, in
test_skyoffset_transformations.py
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
ICRS,
CartesianDifferential,
CartesianRepresentation,
Galactic,
PrecessedGeocentric,
RadialDifferential,
SkyCoord,
SphericalCosLatDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test_creation_frameobjs():
i = ICRS(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=0.2 * u.mas / u.yr, pm_dec=0.1 * u.mas / u.yr
)
sc = SkyCoord(i)
for attrnm in ["ra", "dec", "pm_ra_cosdec", "pm_dec"]:
assert_quantity_allclose(getattr(i, attrnm), getattr(sc, attrnm))
sc_nod = SkyCoord(ICRS(1 * u.deg, 2 * u.deg))
for attrnm in ["ra", "dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_nod, attrnm))
def test_creation_attrs():
sc1 = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra_cosdec=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
frame="fk5",
)
assert_quantity_allclose(sc1.ra, 1 * u.deg)
assert_quantity_allclose(sc1.dec, 2 * u.deg)
assert_quantity_allclose(sc1.pm_ra_cosdec, 0.2 * u.arcsec / u.kyr)
assert_quantity_allclose(sc1.pm_dec, 0.1 * u.arcsec / u.kyr)
sc2 = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
differential_type=SphericalDifferential,
)
assert_quantity_allclose(sc2.ra, 1 * u.deg)
assert_quantity_allclose(sc2.dec, 2 * u.deg)
assert_quantity_allclose(sc2.pm_ra, 0.2 * u.arcsec / u.kyr)
assert_quantity_allclose(sc2.pm_dec, 0.1 * u.arcsec / u.kyr)
sc3 = SkyCoord(
"1:2:3 4:5:6",
pm_ra_cosdec=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
unit=(u.hour, u.deg),
)
assert_quantity_allclose(
sc3.ra, 1 * u.hourangle + 2 * u.arcmin * 15 + 3 * u.arcsec * 15
)
assert_quantity_allclose(sc3.dec, 4 * u.deg + 5 * u.arcmin + 6 * u.arcsec)
# might as well check with sillier units?
assert_quantity_allclose(
sc3.pm_ra_cosdec, 1.2776637006616473e-07 * u.arcmin / u.fortnight
)
assert_quantity_allclose(sc3.pm_dec, 6.388318503308237e-08 * u.arcmin / u.fortnight)
def test_creation_copy_basic():
i = ICRS(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=0.2 * u.mas / u.yr, pm_dec=0.1 * u.mas / u.yr
)
sc = SkyCoord(i)
sc_cpy = SkyCoord(sc)
for attrnm in ["ra", "dec", "pm_ra_cosdec", "pm_dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
def test_creation_copy_rediff():
sc = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
differential_type=SphericalDifferential,
)
sc_cpy = SkyCoord(sc)
for attrnm in ["ra", "dec", "pm_ra", "pm_dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
sc_newdiff = SkyCoord(sc, differential_type=SphericalCosLatDifferential)
reprepr = sc.represent_as(SphericalRepresentation, SphericalCosLatDifferential)
assert_quantity_allclose(
sc_newdiff.pm_ra_cosdec, reprepr.differentials["s"].d_lon_coslat
)
def test_creation_cartesian():
rep = CartesianRepresentation([10, 0.0, 0.0] * u.pc)
dif = CartesianDifferential([0, 100, 0.0] * u.pc / u.Myr)
rep = rep.with_differentials(dif)
c = SkyCoord(rep)
sdif = dif.represent_as(SphericalCosLatDifferential, rep)
assert_quantity_allclose(c.pm_ra_cosdec, sdif.d_lon_coslat)
def test_useful_error_missing():
sc_nod = SkyCoord(ICRS(1 * u.deg, 2 * u.deg))
try:
sc_nod.l
except AttributeError as e:
# this is double-checking the *normal* behavior
msg_l = e.args[0]
try:
sc_nod.pm_dec
except Exception as e:
msg_pm_dec = e.args[0]
assert "has no attribute" in msg_l
assert "has no associated differentials" in msg_pm_dec
# ----------------------Operations on SkyCoords w/ velocities-------------------
# define some fixtures to get baseline coordinates to try operations with
@pytest.fixture(
scope="module", params=[(False, False), (True, False), (False, True), (True, True)]
)
def sc(request):
incldist, inclrv = request.param
args = [1 * u.deg, 2 * u.deg]
kwargs = dict(pm_dec=1 * u.mas / u.yr, pm_ra_cosdec=2 * u.mas / u.yr)
if incldist:
kwargs["distance"] = 213.4 * u.pc
if inclrv:
kwargs["radial_velocity"] = 61 * u.km / u.s
return SkyCoord(*args, **kwargs)
@pytest.fixture(scope="module")
def scmany():
return SkyCoord(
ICRS(
ra=[1] * 100 * u.deg,
dec=[2] * 100 * u.deg,
pm_ra_cosdec=np.random.randn(100) * u.mas / u.yr,
pm_dec=np.random.randn(100) * u.mas / u.yr,
)
)
@pytest.fixture(scope="module")
def sc_for_sep():
return SkyCoord(
1 * u.deg, 2 * u.deg, pm_dec=1 * u.mas / u.yr, pm_ra_cosdec=2 * u.mas / u.yr
)
def test_separation(sc, sc_for_sep):
sc.separation(sc_for_sep)
def test_accessors(sc, scmany):
sc.data.differentials["s"]
sph = sc.spherical
gal = sc.galactic
if sc.data.get_name().startswith("unit") and not sc.data.differentials[
"s"
].get_name().startswith("unit"):
# this xfail can be eliminated when issue #7028 is resolved
pytest.xfail(".velocity fails if there is an RV but not distance")
sc.velocity
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
scmany[0]
sph = scmany.spherical
gal = scmany.galactic
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
def test_transforms(sc):
trans = sc.transform_to("galactic")
assert isinstance(trans.frame, Galactic)
def test_transforms_diff(sc):
# note that arguably this *should* fail for the no-distance cases: 3D
# information is necessary to truly solve this, hence the xfail
if not sc.distance.unit.is_equivalent(u.m):
pytest.xfail("Should fail for no-distance cases")
else:
trans = sc.transform_to(PrecessedGeocentric(equinox="B1975"))
assert isinstance(trans.frame, PrecessedGeocentric)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_matching(sc, scmany):
# just check that it works and yields something
idx, d2d, d3d = sc.match_to_catalog_sky(scmany)
def test_position_angle(sc, sc_for_sep):
sc.position_angle(sc_for_sep)
def test_constellations(sc):
const = sc.get_constellation()
assert const == "Pisces"
def test_separation_3d_with_differentials():
c1 = SkyCoord(
ra=138 * u.deg,
dec=-17 * u.deg,
distance=100 * u.pc,
pm_ra_cosdec=5 * u.mas / u.yr,
pm_dec=-7 * u.mas / u.yr,
radial_velocity=160 * u.km / u.s,
)
c2 = SkyCoord(
ra=138 * u.deg,
dec=-17 * u.deg,
distance=105 * u.pc,
pm_ra_cosdec=15 * u.mas / u.yr,
pm_dec=-74 * u.mas / u.yr,
radial_velocity=-60 * u.km / u.s,
)
sep = c1.separation_3d(c2)
assert_quantity_allclose(sep, 5 * u.pc)
@pytest.mark.parametrize("sph_type", ["spherical", "unitspherical"])
def test_cartesian_to_spherical(sph_type):
"""Conversion to unitspherical should work, even if we lose distance."""
c = SkyCoord(
x=1 * u.kpc,
y=0 * u.kpc,
z=0 * u.kpc,
v_x=10 * u.km / u.s,
v_y=0 * u.km / u.s,
v_z=4.74 * u.km / u.s,
representation_type="cartesian",
)
c.representation_type = sph_type
assert c.ra == 0
assert c.dec == 0
assert c.pm_ra == 0
assert u.allclose(c.pm_dec, 1 * u.mas / u.yr, rtol=1e-3)
assert c.radial_velocity == 10 * u.km / u.s
if sph_type == "spherical":
assert c.distance == 1 * u.kpc
else:
assert not hasattr(c, "distance")
@pytest.mark.parametrize(
"diff_info, diff_cls",
[
(dict(radial_velocity=[20, 30] * u.km / u.s), RadialDifferential),
(
dict(
pm_ra=[2, 3] * u.mas / u.yr,
pm_dec=[-3, -4] * u.mas / u.yr,
differential_type="unitspherical",
),
UnitSphericalDifferential,
),
(
dict(pm_ra_cosdec=[2, 3] * u.mas / u.yr, pm_dec=[-3, -4] * u.mas / u.yr),
UnitSphericalCosLatDifferential,
),
],
scope="class",
)
class TestDifferentialClassPropagation:
"""Test that going in between spherical and unit-spherical, we do not
change differential type (since both can handle the same types).
"""
def test_sc_unit_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(ra=[10, 20] * u.deg, dec=[-10, 10] * u.deg, **diff_info)
assert isinstance(sc.data, UnitSphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("spherical")
assert isinstance(sr, SphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
def test_sc_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(
ra=[10, 20] * u.deg,
dec=[-10, 10] * u.deg,
distance=1.0 * u.kpc,
**diff_info
)
assert isinstance(sc.data, SphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("unitspherical")
assert isinstance(sr, UnitSphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
|
742ee0e488f668d11984ca38719a684179d5f063d93fe18b4b1c401dbbbe70e2 | from contextlib import nullcontext
import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy import time
from astropy.constants import c
from astropy.coordinates import (
FK5,
GCRS,
ICRS,
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
Galactic,
SkyCoord,
SpectralQuantity,
get_body_barycentric_posvel,
)
from astropy.coordinates.sites import get_builtin_sites
from astropy.coordinates.spectral_coordinate import (
SpectralCoord,
_apply_relativistic_doppler_shift,
)
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose, quantity_allclose
from astropy.utils import iers
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning, AstropyWarning
from astropy.wcs.wcsapi.fitswcs import VELOCITY_FRAMES as FITSWCS_VELOCITY_FRAMES
GREENWICH = get_builtin_sites()["greenwich"]
def assert_frame_allclose(
frame1,
frame2,
pos_rtol=1e-7,
pos_atol=1 * u.m,
vel_rtol=1e-7,
vel_atol=1 * u.mm / u.s,
):
# checks that:
# - the positions are equal to within some tolerance (the relative tolerance
# should be dimensionless, the absolute tolerance should be a distance).
# note that these are the tolerances *in 3d*
# - either both or nether frame has velocities, or if one has no velocities
# the other one can have zero velocities
# - if velocities are present, they are equal to some tolerance
# Ideally this should accept both frames and SkyCoords
if hasattr(frame1, "frame"): # SkyCoord-like
frame1 = frame1.frame
if hasattr(frame2, "frame"): # SkyCoord-like
frame2 = frame2.frame
# assert (frame1.data.differentials and frame2.data.differentials or
# (not frame1.data.differentials and not frame2.data.differentials))
assert frame1.is_equivalent_frame(frame2)
frame2_in_1 = frame2.transform_to(frame1)
assert_quantity_allclose(
0 * u.m, frame1.separation_3d(frame2_in_1), rtol=pos_rtol, atol=pos_atol
)
if frame1.data.differentials:
d1 = frame1.data.represent_as(
CartesianRepresentation, CartesianDifferential
).differentials["s"]
d2 = frame2_in_1.data.represent_as(
CartesianRepresentation, CartesianDifferential
).differentials["s"]
assert_quantity_allclose(d1.norm(d1), d1.norm(d2), rtol=vel_rtol, atol=vel_atol)
# GENERAL TESTS
# We first run through a series of cases to test different ways of initializing
# the observer and target for SpectralCoord, including for example frames,
# SkyCoords, and making sure that SpectralCoord is not sensitive to the actual
# frame or representation class.
# Local Standard of Rest
LSRD = Galactic(
u=0.1 * u.km,
v=0.1 * u.km,
w=0.1 * u.km,
U=9 * u.km / u.s,
V=12 * u.km / u.s,
W=7 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
LSRD_EQUIV = [
LSRD,
SkyCoord(LSRD), # as a SkyCoord
LSRD.transform_to(ICRS()), # different frame
LSRD.transform_to(ICRS()).transform_to(Galactic()), # different representation
]
@pytest.fixture(params=[None] + LSRD_EQUIV)
def observer(request):
return request.param
# Target located in direction of motion of LSRD with no velocities
LSRD_DIR_STATIONARY = Galactic(
u=9 * u.km, v=12 * u.km, w=7 * u.km, representation_type="cartesian"
)
LSRD_DIR_STATIONARY_EQUIV = [
LSRD_DIR_STATIONARY,
SkyCoord(LSRD_DIR_STATIONARY), # as a SkyCoord
LSRD_DIR_STATIONARY.transform_to(FK5()), # different frame
# different representation
LSRD_DIR_STATIONARY.transform_to(ICRS()).transform_to(Galactic()),
]
@pytest.fixture(params=[None] + LSRD_DIR_STATIONARY_EQUIV)
def target(request):
return request.param
def test_create_spectral_coord_observer_target(observer, target):
with nullcontext() if target is None else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
coord = SpectralCoord([100, 200, 300] * u.nm, observer=observer, target=target)
if observer is None:
assert coord.observer is None
else:
assert_frame_allclose(observer, coord.observer)
if target is None:
assert coord.target is None
else:
assert_frame_allclose(target, coord.target)
assert coord.doppler_rest is None
assert coord.doppler_convention is None
if observer is None or target is None:
assert quantity_allclose(coord.redshift, 0)
assert quantity_allclose(coord.radial_velocity, 0 * u.km / u.s)
elif any(observer is lsrd for lsrd in LSRD_EQUIV) and any(
target is lsrd for lsrd in LSRD_DIR_STATIONARY_EQUIV
):
assert_quantity_allclose(
coord.radial_velocity, -(274**0.5) * u.km / u.s, atol=1e-4 * u.km / u.s
)
assert_quantity_allclose(coord.redshift, -5.5213158163147646e-05, atol=1e-9)
else:
raise NotImplementedError()
def test_create_from_spectral_coord(observer, target):
"""
Checks that parameters are correctly copied to the new SpectralCoord object
"""
with nullcontext() if target is None else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
spec_coord1 = SpectralCoord(
[100, 200, 300] * u.nm,
observer=observer,
target=target,
doppler_convention="optical",
doppler_rest=6000 * u.AA,
)
spec_coord2 = SpectralCoord(spec_coord1)
assert spec_coord1.observer == spec_coord2.observer
assert spec_coord1.target == spec_coord2.target
assert spec_coord1.radial_velocity == spec_coord2.radial_velocity
assert spec_coord1.doppler_convention == spec_coord2.doppler_convention
assert spec_coord1.doppler_rest == spec_coord2.doppler_rest
# INTERNAL FUNCTIONS TESTS
def test_apply_relativistic_doppler_shift():
# Frequency
sq1 = SpectralQuantity(1 * u.GHz)
sq2 = _apply_relativistic_doppler_shift(sq1, 0.5 * c)
assert_quantity_allclose(sq2, np.sqrt(1.0 / 3.0) * u.GHz)
# Wavelength
sq3 = SpectralQuantity(500 * u.nm)
sq4 = _apply_relativistic_doppler_shift(sq3, 0.5 * c)
assert_quantity_allclose(sq4, np.sqrt(3) * 500 * u.nm)
# Energy
sq5 = SpectralQuantity(300 * u.eV)
sq6 = _apply_relativistic_doppler_shift(sq5, 0.5 * c)
assert_quantity_allclose(sq6, np.sqrt(1.0 / 3.0) * 300 * u.eV)
# Wavenumber
sq7 = SpectralQuantity(0.01 / u.micron)
sq8 = _apply_relativistic_doppler_shift(sq7, 0.5 * c)
assert_quantity_allclose(sq8, np.sqrt(1.0 / 3.0) * 0.01 / u.micron)
# Velocity (doppler_convention='relativistic')
sq9 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="relativistic", doppler_rest=1 * u.GHz
)
sq10 = _apply_relativistic_doppler_shift(sq9, 300 * u.km / u.s)
assert_quantity_allclose(sq10, 499.999666 * u.km / u.s)
assert sq10.doppler_convention == "relativistic"
# Velocity (doppler_convention='optical')
sq11 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="radio", doppler_rest=1 * u.GHz
)
sq12 = _apply_relativistic_doppler_shift(sq11, 300 * u.km / u.s)
assert_quantity_allclose(sq12, 499.650008 * u.km / u.s)
assert sq12.doppler_convention == "radio"
# Velocity (doppler_convention='radio')
sq13 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="optical", doppler_rest=1 * u.GHz
)
sq14 = _apply_relativistic_doppler_shift(sq13, 300 * u.km / u.s)
assert_quantity_allclose(sq14, 500.350493 * u.km / u.s)
assert sq14.doppler_convention == "optical"
# Velocity - check relativistic velocity addition
sq13 = SpectralQuantity(
0 * u.km / u.s, doppler_convention="relativistic", doppler_rest=1 * u.GHz
)
sq14 = _apply_relativistic_doppler_shift(sq13, 0.999 * c)
assert_quantity_allclose(sq14, 0.999 * c)
sq14 = _apply_relativistic_doppler_shift(sq14, 0.999 * c)
assert_quantity_allclose(sq14, (0.999 * 2) / (1 + 0.999**2) * c)
assert sq14.doppler_convention == "relativistic"
# Cases that should raise errors
sq15 = SpectralQuantity(200 * u.km / u.s)
with pytest.raises(ValueError, match="doppler_convention not set"):
_apply_relativistic_doppler_shift(sq15, 300 * u.km / u.s)
sq16 = SpectralQuantity(200 * u.km / u.s, doppler_rest=10 * u.GHz)
with pytest.raises(ValueError, match="doppler_convention not set"):
_apply_relativistic_doppler_shift(sq16, 300 * u.km / u.s)
sq17 = SpectralQuantity(200 * u.km / u.s, doppler_convention="optical")
with pytest.raises(ValueError, match="doppler_rest not set"):
_apply_relativistic_doppler_shift(sq17, 300 * u.km / u.s)
# BASIC TESTS
def test_init_quantity():
sc = SpectralCoord(10 * u.GHz)
assert sc.value == 10.0
assert sc.unit is u.GHz
assert sc.doppler_convention is None
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_spectral_quantity():
sc = SpectralCoord(SpectralQuantity(10 * u.GHz, doppler_convention="optical"))
assert sc.value == 10.0
assert sc.unit is u.GHz
assert sc.doppler_convention == "optical"
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_too_many_args():
with pytest.raises(
ValueError, match="Cannot specify radial velocity or redshift if both"
):
SpectralCoord(
10 * u.GHz,
observer=LSRD,
target=SkyCoord(10, 20, unit="deg"),
radial_velocity=1 * u.km / u.s,
)
with pytest.raises(
ValueError, match="Cannot specify radial velocity or redshift if both"
):
SpectralCoord(
10 * u.GHz, observer=LSRD, target=SkyCoord(10, 20, unit="deg"), redshift=1
)
with pytest.raises(
ValueError, match="Cannot set both a radial velocity and redshift"
):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.km / u.s, redshift=1)
def test_init_wrong_type():
with pytest.raises(
TypeError, match="observer must be a SkyCoord or coordinate frame instance"
):
SpectralCoord(10 * u.GHz, observer=3.4)
with pytest.raises(
TypeError, match="target must be a SkyCoord or coordinate frame instance"
):
SpectralCoord(10 * u.GHz, target=3.4)
with pytest.raises(
u.UnitsError,
match=(
"Argument 'radial_velocity' to function "
"'__new__' must be in units convertible to 'km / s'"
),
):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.kg)
with pytest.raises(
TypeError,
match=(
"Argument 'radial_velocity' to function '__new__' has no 'unit' attribute."
" You should pass in an astropy Quantity instead."
),
):
SpectralCoord(10 * u.GHz, radial_velocity="banana")
with pytest.raises(u.UnitsError, match="redshift should be dimensionless"):
SpectralCoord(10 * u.GHz, redshift=1 * u.m)
with pytest.raises(
TypeError,
match='Cannot parse "banana" as a Quantity. It does not start with a number.',
):
SpectralCoord(10 * u.GHz, redshift="banana")
def test_observer_init_rv_behavior():
"""
Test basic initialization behavior or observer/target and redshift/rv
"""
# Start off by specifying the radial velocity only
sc_init = SpectralCoord([4000, 5000] * u.AA, radial_velocity=100 * u.km / u.s)
assert sc_init.observer is None
assert sc_init.target is None
assert_quantity_allclose(sc_init.radial_velocity, 100 * u.km / u.s)
# Next, set the observer, and check that the radial velocity hasn't changed
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init.observer = ICRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
assert sc_init.observer is not None
assert_quantity_allclose(sc_init.radial_velocity, 100 * u.km / u.s)
# Setting the target should now cause the original radial velocity to be
# dropped in favor of the automatically computed one
sc_init.target = SkyCoord(
CartesianRepresentation([1 * u.km, 0 * u.km, 0 * u.km]),
frame="icrs",
radial_velocity=30 * u.km / u.s,
)
assert sc_init.target is not None
assert_quantity_allclose(sc_init.radial_velocity, 30 * u.km / u.s)
# The observer can only be set if originally None - now that it isn't
# setting it again should fail
with pytest.raises(ValueError, match="observer has already been set"):
sc_init.observer = GCRS(CartesianRepresentation([0 * u.km, 1 * u.km, 0 * u.km]))
# And similarly, changing the target should not be possible
with pytest.raises(ValueError, match="target has already been set"):
sc_init.target = GCRS(CartesianRepresentation([0 * u.km, 1 * u.km, 0 * u.km]))
def test_rv_redshift_initialization():
# Check that setting the redshift sets the radial velocity appropriately,
# and that the redshift can be recovered
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=1)
assert isinstance(sc_init.redshift, u.Quantity)
assert_quantity_allclose(sc_init.redshift, 1 * u.dimensionless_unscaled)
assert_quantity_allclose(sc_init.radial_velocity, 0.6 * c)
# Check that setting the same radial velocity produces the same redshift
# and that the radial velocity can be recovered
sc_init2 = SpectralCoord([4000, 5000] * u.AA, radial_velocity=0.6 * c)
assert_quantity_allclose(sc_init2.redshift, 1 * u.dimensionless_unscaled)
assert_quantity_allclose(sc_init2.radial_velocity, 0.6 * c)
# Check that specifying redshift as a quantity works
sc_init3 = SpectralCoord([4000, 5000] * u.AA, redshift=1 * u.one)
assert sc_init.redshift == sc_init3.redshift
# Make sure that both redshift and radial velocity can't be specified at
# the same time.
with pytest.raises(
ValueError, match="Cannot set both a radial velocity and redshift"
):
SpectralCoord([4000, 5000] * u.AA, radial_velocity=10 * u.km / u.s, redshift=2)
def test_replicate():
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_set_rv = sc_init.replicate(redshift=1)
assert_quantity_allclose(sc_set_rv.radial_velocity, 0.6 * c)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
sc_set_rv = sc_init.replicate(radial_velocity=c / 2)
assert_quantity_allclose(sc_set_rv.redshift, np.sqrt(3) - 1)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
gcrs_origin = GCRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init2 = SpectralCoord([4000, 5000] * u.AA, redshift=1, observer=gcrs_origin)
with np.errstate(all="ignore"):
sc_init2.replicate(redshift=0.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init3 = SpectralCoord([4000, 5000] * u.AA, redshift=1, target=gcrs_origin)
with np.errstate(all="ignore"):
sc_init3.replicate(redshift=0.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init4 = SpectralCoord(
[4000, 5000] * u.AA, observer=gcrs_origin, target=gcrs_origin
)
with pytest.raises(
ValueError,
match=(
"Cannot specify radial velocity or redshift if both target and observer are"
" specified"
),
):
sc_init4.replicate(redshift=0.5)
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_init_copy = sc_init.replicate(copy=True)
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_copy, [4000, 5000] * u.AA)
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_init_ref = sc_init.replicate()
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_ref, [6000, 5000] * u.AA)
def test_with_observer_stationary_relative_to():
# Simple tests of with_observer_stationary_relative_to to cover different
# ways of calling it
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc1 = SpectralCoord([4000, 5000] * u.AA)
with pytest.raises(
ValueError,
match=(
"This method can only be used if both observer and target are defined on"
" the SpectralCoord"
),
):
sc1.with_observer_stationary_relative_to("icrs")
sc2 = SpectralCoord(
[4000, 5000] * u.AA,
observer=ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
-1 * u.km / u.s,
0 * u.km / u.s,
-1 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
),
target=ICRS(
0 * u.deg, 45 * u.deg, distance=1 * u.kpc, radial_velocity=2 * u.km / u.s
),
)
# Motion of observer is in opposite direction to target
assert_quantity_allclose(sc2.radial_velocity, (2 + 2**0.5) * u.km / u.s)
# Change to observer that is stationary in ICRS
sc3 = sc2.with_observer_stationary_relative_to("icrs")
# Velocity difference is now pure radial velocity of target
assert_quantity_allclose(sc3.radial_velocity, 2 * u.km / u.s)
# Check setting the velocity in with_observer_stationary_relative_to
sc4 = sc2.with_observer_stationary_relative_to(
"icrs", velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
# Observer once again moving away from target but faster
assert_quantity_allclose(sc4.radial_velocity, 4 * u.km / u.s)
# Check that we can also pass frame classes instead of names
sc5 = sc2.with_observer_stationary_relative_to(
ICRS, velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
assert_quantity_allclose(sc5.radial_velocity, 4 * u.km / u.s)
# And make sure we can also pass instances of classes without data
sc6 = sc2.with_observer_stationary_relative_to(
ICRS(), velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
assert_quantity_allclose(sc6.radial_velocity, 4 * u.km / u.s)
# And with data provided no velocities are present
sc7 = sc2.with_observer_stationary_relative_to(
ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type="cartesian"),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
assert_quantity_allclose(sc7.radial_velocity, 4 * u.km / u.s)
# And also have the ability to pass frames with velocities already defined
sc8 = sc2.with_observer_stationary_relative_to(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
)
assert_quantity_allclose(
sc8.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# Make sure that things work properly if passing a SkyCoord
sc9 = sc2.with_observer_stationary_relative_to(
SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type="cartesian")),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
assert_quantity_allclose(sc9.radial_velocity, 4 * u.km / u.s)
sc10 = sc2.with_observer_stationary_relative_to(
SkyCoord(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
)
)
assert_quantity_allclose(
sc10.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# But we shouldn't be able to pass both a frame with velocities, and explicit velocities
with pytest.raises(
ValueError,
match="frame already has differentials, cannot also specify velocity",
):
sc2.with_observer_stationary_relative_to(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
# And velocities should have three elements
with pytest.raises(
ValueError, match="velocity should be a Quantity vector with 3 elements"
):
sc2.with_observer_stationary_relative_to(
ICRS, velocity=[-(2**0.5), 0, -(2**0.5), -3] * u.km / u.s
)
# Make sure things don't change depending on what frame class is used for reference
sc11 = sc2.with_observer_stationary_relative_to(
SkyCoord(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
).transform_to(Galactic)
)
assert_quantity_allclose(
sc11.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# Check that it is possible to preserve the observer frame
sc12 = sc2.with_observer_stationary_relative_to(LSRD)
sc13 = sc2.with_observer_stationary_relative_to(LSRD, preserve_observer_frame=True)
assert isinstance(sc12.observer, Galactic)
assert isinstance(sc13.observer, ICRS)
def test_los_shift_radial_velocity():
# Tests to make sure that with_radial_velocity_shift correctly calculates
# the new radial velocity
# First check case where observer and/or target aren't specified
sc1 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s)
sc2 = sc1.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc2.radial_velocity, 2 * u.km / u.s)
sc3 = sc1.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc3.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc4 = SpectralCoord(
500 * u.nm, radial_velocity=1 * u.km / u.s, observer=gcrs_not_origin
)
sc5 = sc4.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc5.radial_velocity, 2 * u.km / u.s)
sc6 = sc4.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc6.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc7 = SpectralCoord(
500 * u.nm,
radial_velocity=1 * u.km / u.s,
target=ICRS(10 * u.deg, 20 * u.deg),
)
sc8 = sc7.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc8.radial_velocity, 2 * u.km / u.s)
sc9 = sc7.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc9.radial_velocity, -2 * u.km / u.s)
# Check that things still work when both observer and target are specified
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc10 = SpectralCoord(
500 * u.nm,
observer=ICRS(0 * u.deg, 0 * u.deg, distance=1 * u.m),
target=ICRS(
10 * u.deg,
20 * u.deg,
radial_velocity=1 * u.km / u.s,
distance=10 * u.kpc,
),
)
sc11 = sc10.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc11.radial_velocity, 2 * u.km / u.s)
sc12 = sc10.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc12.radial_velocity, -2 * u.km / u.s)
# Check that things work if radial_velocity wasn't specified at all
sc13 = SpectralCoord(500 * u.nm)
sc14 = sc13.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc14.radial_velocity, 1 * u.km / u.s)
sc15 = sc1.with_radial_velocity_shift()
assert_quantity_allclose(sc15.radial_velocity, 1 * u.km / u.s)
# Check that units are verified
with pytest.raises(
u.UnitsError,
match=(
"Argument must have unit physical type 'speed' for radial velocty or "
"'dimensionless' for redshift."
),
):
sc1.with_radial_velocity_shift(target_shift=1 * u.kg)
@pytest.mark.xfail
def test_relativistic_radial_velocity():
# Test for when both observer and target have relativistic velocities.
# This is not yet supported, so the test is xfailed for now.
sc = SpectralCoord(
500 * u.nm,
observer=ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
-0.5 * c,
-0.5 * c,
-0.5 * c,
representation_type="cartesian",
differential_type="cartesian",
),
target=ICRS(
1 * u.kpc,
1 * u.kpc,
1 * u.kpc,
0.5 * c,
0.5 * c,
0.5 * c,
representation_type="cartesian",
differential_type="cartesian",
),
)
assert_quantity_allclose(sc.radial_velocity, 0.989743318610787 * u.km / u.s)
# SCIENCE USE CASE TESTS
def test_spectral_coord_jupiter():
"""
Checks radial velocity between Earth and Jupiter
"""
obstime = time.Time("2018-12-13 9:00")
obs = GREENWICH.get_gcrs(obstime)
pos, vel = get_body_barycentric_posvel("jupiter", obstime)
jupiter = SkyCoord(
pos.with_differentials(CartesianDifferential(vel.xyz)), obstime=obstime
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=jupiter)
# The velocity should be less than ~43 + a bit extra, which is the
# maximum possible earth-jupiter relative velocity. We check the exact
# value here (determined from SpectralCoord, so this serves as a test to
# check that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -7.35219854 * u.km / u.s)
def test_spectral_coord_alphacen():
"""
Checks radial velocity between Earth and Alpha Centauri
"""
obstime = time.Time("2018-12-13 9:00")
obs = GREENWICH.get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# acen = SkyCoord.from_name('alpha cen')
acen = SkyCoord(
ra=219.90085 * u.deg,
dec=-60.83562 * u.deg,
frame="icrs",
distance=4.37 * u.lightyear,
radial_velocity=-18.0 * u.km / u.s,
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=acen)
# The velocity should be less than ~18 + 30 + a bit extra, which is the
# maximum possible relative velocity. We check the exact value here
# (determined from SpectralCoord, so this serves as a test to check that
# this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -26.328301 * u.km / u.s)
def test_spectral_coord_m31():
"""
Checks radial velocity between Earth and M31
"""
obstime = time.Time("2018-12-13 9:00")
obs = GREENWICH.get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# m31 = SkyCoord.from_name('M31')
m31 = SkyCoord(
ra=10.6847 * u.deg,
dec=41.269 * u.deg,
distance=710 * u.kpc,
radial_velocity=-300 * u.km / u.s,
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=m31)
# The velocity should be less than ~300 + 30 + a bit extra in km/s, which
# is the maximum possible relative velocity. We check the exact values
# here (determined from SpectralCoord, so this serves as a test to check
# that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -279.755128 * u.km / u.s)
assert_allclose(spc.redshift, -0.0009327276702120191)
def test_shift_to_rest_galaxy():
"""
This tests storing a spectral coordinate with a specific redshift, and then
doing basic rest-to-observed-and-back transformations
"""
z = 5
rest_line_wls = [5007, 6563] * u.AA
observed_spc = SpectralCoord(rest_line_wls * (z + 1), redshift=z)
rest_spc = observed_spc.to_rest()
# alternatively:
# rest_spc = observed_spc.with_observer(observed_spec.target)
# although then it would have to be clearly documented, or the `to_rest`
# implemented in Spectrum1D?
assert_quantity_allclose(rest_spc, rest_line_wls)
# No frames are explicitly defined, so to the user, the observer and
# target are not set.
with pytest.raises(AttributeError):
assert_frame_allclose(rest_spc.observer, rest_spc.target)
def test_shift_to_rest_star_withobserver():
rv = -8.3283011 * u.km / u.s
rest_line_wls = [5007, 6563] * u.AA
obstime = time.Time("2018-12-13 9:00")
obs = GREENWICH.get_gcrs(obstime)
acen = SkyCoord(
ra=219.90085 * u.deg,
dec=-60.83562 * u.deg,
frame="icrs",
distance=4.37 * u.lightyear,
)
# Note that above the rv is missing from the SkyCoord.
# That's intended, as it will instead be set in the `SpectralCoord`. But
# the SpectralCoord machinery should yield something comparable to test_
# spectral_coord_alphacen
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
observed_spc = SpectralCoord(
rest_line_wls * (rv / c + 1), observer=obs, target=acen
)
rest_spc = observed_spc.to_rest()
assert_quantity_allclose(rest_spc, rest_line_wls)
barycentric_spc = observed_spc.with_observer_stationary_relative_to("icrs")
baryrest_spc = barycentric_spc.to_rest()
assert quantity_allclose(baryrest_spc, rest_line_wls)
# now make sure the change the barycentric shift did is comparable to the
# offset rv_correction produces
# barytarg = SkyCoord(barycentric_spc.target.frame) # should be this but that doesn't work for unclear reasons
barytarg = SkyCoord(
barycentric_spc.target.data.without_differentials(),
frame=barycentric_spc.target.realize_frame(None),
)
vcorr = barytarg.radial_velocity_correction(
kind="barycentric", obstime=obstime, location=GREENWICH
)
drv = baryrest_spc.radial_velocity - observed_spc.radial_velocity
# note this probably will not work on the first try, but it's ok if this is
# "good enough", where good enough is estimated below. But that could be
# adjusted if we think that's too aggressive of a precision target for what
# the machinery can handle
# with pytest.raises(AssertionError):
assert_quantity_allclose(vcorr, drv, atol=10 * u.m / u.s)
gcrs_origin = GCRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
gcrs_not_origin = GCRS(CartesianRepresentation([1 * u.km, 0 * u.km, 0 * u.km]))
@pytest.mark.parametrize(
"sc_kwargs",
[
dict(radial_velocity=0 * u.km / u.s),
dict(observer=gcrs_origin, radial_velocity=0 * u.km / u.s),
dict(target=gcrs_origin, radial_velocity=0 * u.km / u.s),
dict(observer=gcrs_origin, target=gcrs_not_origin),
],
)
def test_los_shift(sc_kwargs):
wl = [4000, 5000] * u.AA
with nullcontext() if "observer" not in sc_kwargs and "target" not in sc_kwargs else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
sc_init = SpectralCoord(wl, **sc_kwargs)
# these should always work in *all* cases because it's unambiguous that
# a target shift should behave this way
new_sc1 = sc_init.with_radial_velocity_shift(0.1)
assert_quantity_allclose(new_sc1, wl * 1.1)
# interpret at redshift
new_sc2 = sc_init.with_radial_velocity_shift(0.1 * u.dimensionless_unscaled)
assert_quantity_allclose(new_sc1, new_sc2)
new_sc3 = sc_init.with_radial_velocity_shift(-100 * u.km / u.s)
assert_quantity_allclose(new_sc3, wl * (1 + (-100 * u.km / u.s / c)))
# now try the cases where observer is specified as well/instead
if sc_init.observer is None or sc_init.target is None:
with pytest.raises(ValueError):
# both must be specified if you're going to mess with observer
sc_init.with_radial_velocity_shift(observer_shift=0.1)
if sc_init.observer is not None and sc_init.target is not None:
# redshifting the observer should *blushift* the LOS velocity since
# its the observer-to-target vector that matters
new_sc4 = sc_init.with_radial_velocity_shift(observer_shift=0.1)
assert_quantity_allclose(new_sc4, wl / 1.1)
# an equal shift in both should produce no offset at all
new_sc5 = sc_init.with_radial_velocity_shift(
target_shift=0.1, observer_shift=0.1
)
assert_quantity_allclose(new_sc5, wl)
def test_asteroid_velocity_frame_shifts():
"""
This test mocks up the use case of observing a spectrum of an asteroid
at different times and from different observer locations.
"""
time1 = time.Time("2018-12-13 9:00")
dt = 12 * u.hour
time2 = time1 + dt
# make the silly but simplifying assumption that the asteroid is moving along
# the x-axis of GCRS, and makes a 10 earth-radius closest approach
v_ast = [5, 0, 0] * u.km / u.s
x1 = -v_ast[0] * dt / 2
x2 = v_ast[0] * dt / 2
z = 10 * u.Rearth
cdiff = CartesianDifferential(v_ast)
asteroid_loc1 = GCRS(
CartesianRepresentation(x1.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time1,
)
asteroid_loc2 = GCRS(
CartesianRepresentation(x2.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time2,
)
# assume satellites that are essentially fixed in geostationary orbit on
# opposite sides of the earth
observer1 = GCRS(
CartesianRepresentation([0 * u.km, 35000 * u.km, 0 * u.km]), obstime=time1
)
observer2 = GCRS(
CartesianRepresentation([0 * u.km, -35000 * u.km, 0 * u.km]), obstime=time2
)
wls = np.linspace(4000, 7000, 100) * u.AA
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord1 = SpectralCoord(wls, observer=observer1, target=asteroid_loc1)
assert spec_coord1.radial_velocity < 0 * u.km / u.s
assert spec_coord1.radial_velocity > -5 * u.km / u.s
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord2 = SpectralCoord(wls, observer=observer2, target=asteroid_loc2)
assert spec_coord2.radial_velocity > 0 * u.km / u.s
assert spec_coord2.radial_velocity < 5 * u.km / u.s
# now check the behavior of with_observer_stationary_relative_to: we shift each coord
# into the velocity frame of its *own* target. That would then be a
# spectralcoord that would allow direct physical comparison of the two
# different spec_corrds. There's no way to test that, without
# actual data, though.
# spec_coord2 is redshifted, so we test that it behaves the way "shifting
# to rest frame" should - the as-observed spectral coordinate should become
# the rest frame, so something that starts out red should become bluer
target_sc2 = spec_coord2.with_observer_stationary_relative_to(spec_coord2.target)
assert np.all(target_sc2 < spec_coord2)
# rv/redshift should be 0 since the observer and target velocities should
# be the same
assert_quantity_allclose(
target_sc2.radial_velocity, 0 * u.km / u.s, atol=1e-7 * u.km / u.s
)
# check that the same holds for spec_coord1, but be more specific: it
# should follow the standard redshift formula (which in this case yields
# a blueshift, although the formula is the same as 1+z)
target_sc1 = spec_coord1.with_observer_stationary_relative_to(spec_coord1.target)
assert_quantity_allclose(target_sc1, spec_coord1 / (1 + spec_coord1.redshift))
# TODO: Figure out what is meant by the below use case
# ensure the "target-rest" use gives the same answer
# target_sc1_alt = spec_coord1.with_observer_stationary_relative_to('target-rest')
# assert_quantity_allclose(target_sc1, target_sc1_alt)
def test_spectral_coord_from_sky_coord_without_distance():
# see https://github.com/astropy/specutils/issues/658 for issue context
obs = SkyCoord(0 * u.m, 0 * u.m, 0 * u.m, representation_type="cartesian")
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
coord = SpectralCoord([1, 2, 3] * u.micron, observer=obs)
# coord.target = SkyCoord.from_name('m31') # <- original issue, but below is the same but requires no remote data access
with pytest.warns(
AstropyUserWarning, match="Distance on coordinate object is dimensionless"
):
coord.target = SkyCoord(ra=10.68470833 * u.deg, dec=41.26875 * u.deg)
EXPECTED_VELOCITY_FRAMES = {
"geocent": "gcrs",
"heliocent": "hcrs",
"lsrk": "lsrk",
"lsrd": "lsrd",
"galactoc": FITSWCS_VELOCITY_FRAMES["GALACTOC"],
"localgrp": FITSWCS_VELOCITY_FRAMES["LOCALGRP"],
}
@pytest.mark.parametrize("specsys", list(EXPECTED_VELOCITY_FRAMES))
@pytest.mark.slow
def test_spectralcoord_accuracy(specsys):
# This is a test to check the numerical results of transformations between
# different velocity frames in SpectralCoord. This compares the velocity
# shifts determined with SpectralCoord to those determined from the rv
# package in Starlink.
velocity_frame = EXPECTED_VELOCITY_FRAMES[specsys]
reference_filename = get_pkg_data_filename("accuracy/data/rv.ecsv")
reference_table = Table.read(reference_filename, format="ascii.ecsv")
rest = 550 * u.nm
with iers.conf.set_temp("auto_download", False):
for row in reference_table:
observer = EarthLocation.from_geodetic(
-row["obslon"], row["obslat"]
).get_itrs(obstime=row["obstime"])
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_topo = SpectralCoord(
545 * u.nm, observer=observer, target=row["target"]
)
# FIXME: A warning is emitted for dates after MJD=57754.0 even
# though the leap second table should be valid until the end of
# 2020.
with nullcontext() if row["obstime"].mjd < 57754 else pytest.warns(
AstropyWarning, match="Tried to get polar motions"
):
sc_final = sc_topo.with_observer_stationary_relative_to(velocity_frame)
delta_vel = sc_topo.to(
u.km / u.s, doppler_convention="relativistic", doppler_rest=rest
) - sc_final.to(
u.km / u.s, doppler_convention="relativistic", doppler_rest=rest
)
if specsys == "galactoc":
assert_allclose(
delta_vel.to_value(u.km / u.s), row[specsys.lower()], atol=30
)
else:
assert_allclose(
delta_vel.to_value(u.km / u.s),
row[specsys.lower()],
atol=0.02,
rtol=0.002,
)
# TODO: add test when target is not ICRS
# TODO: add test when SpectralCoord is in velocity to start with
|
3df8db134888a643dca37928bc289a1da7b02913cf5ba87b1299224b15323480 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.coordinates.matrix_utilities import (
angle_axis,
is_O3,
is_rotation,
matrix_product,
rotation_matrix,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_rotation_matrix():
assert_array_equal(rotation_matrix(0 * u.deg, "x"), np.eye(3))
assert_allclose(
rotation_matrix(90 * u.deg, "y"), [[0, 0, -1], [0, 1, 0], [1, 0, 0]], atol=1e-12
)
assert_allclose(
rotation_matrix(-90 * u.deg, "z"),
[[0, -1, 0], [1, 0, 0], [0, 0, 1]],
atol=1e-12,
)
assert_allclose(
rotation_matrix(45 * u.deg, "x"), rotation_matrix(45 * u.deg, [1, 0, 0])
)
assert_allclose(
rotation_matrix(125 * u.deg, "y"), rotation_matrix(125 * u.deg, [0, 1, 0])
)
assert_allclose(
rotation_matrix(-30 * u.deg, "z"), rotation_matrix(-30 * u.deg, [0, 0, 1])
)
assert_allclose(
np.dot(rotation_matrix(180 * u.deg, [1, 1, 0]), [1, 0, 0]),
[0, 1, 0],
atol=1e-12,
)
# make sure it also works for very small angles
assert_allclose(
rotation_matrix(0.000001 * u.deg, "x"),
rotation_matrix(0.000001 * u.deg, [1, 0, 0]),
)
def test_angle_axis():
m1 = rotation_matrix(35 * u.deg, "x")
an1, ax1 = angle_axis(m1)
assert an1 - 35 * u.deg < 1e-10 * u.deg
assert_allclose(ax1, [1, 0, 0])
m2 = rotation_matrix(-89 * u.deg, [1, 1, 0])
an2, ax2 = angle_axis(m2)
assert an2 - 89 * u.deg < 1e-10 * u.deg
assert_allclose(ax2, [-(2**-0.5), -(2**-0.5), 0])
def test_is_O3():
"""Test the matrix checker ``is_O3``."""
# Normal rotation matrix
m1 = rotation_matrix(35 * u.deg, "x")
assert is_O3(m1)
# and (M, 3, 3)
n1 = np.tile(m1, (2, 1, 1))
assert tuple(is_O3(n1)) == (True, True) # (show the broadcasting)
# Test atol parameter
nn1 = np.tile(0.5 * m1, (2, 1, 1))
assert tuple(is_O3(nn1)) == (False, False) # (show the broadcasting)
assert tuple(is_O3(nn1, atol=1)) == (True, True) # (show the broadcasting)
# reflection
m2 = m1.copy()
m2[0, 0] *= -1
assert is_O3(m2)
# and (M, 3, 3)
n2 = np.stack((m1, m2))
assert tuple(is_O3(n2)) == (True, True) # (show the broadcasting)
# Not any sort of O(3)
m3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert not is_O3(m3)
# and (M, 3, 3)
n3 = np.stack((m1, m3))
assert tuple(is_O3(n3)) == (True, False) # (show the broadcasting)
def test_is_rotation():
"""Test the rotation matrix checker ``is_rotation``."""
# Normal rotation matrix
m1 = rotation_matrix(35 * u.deg, "x")
assert is_rotation(m1)
assert is_rotation(m1, allow_improper=True) # (a less restrictive test)
# and (M, 3, 3)
n1 = np.tile(m1, (2, 1, 1))
assert tuple(is_rotation(n1)) == (True, True) # (show the broadcasting)
# Test atol parameter
nn1 = np.tile(0.5 * m1, (2, 1, 1))
assert tuple(is_rotation(nn1)) == (False, False) # (show the broadcasting)
assert tuple(is_rotation(nn1, atol=10)) == (True, True) # (show the broadcasting)
# Improper rotation (unit rotation + reflection)
m2 = np.identity(3)
m2[0, 0] = -1
assert not is_rotation(m2)
assert is_rotation(m2, allow_improper=True)
# and (M, 3, 3)
n2 = np.stack((m1, m2))
assert tuple(is_rotation(n2)) == (True, False) # (show the broadcasting)
# Not any sort of rotation
m3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert not is_rotation(m3)
assert not is_rotation(m3, allow_improper=True)
# and (M, 3, 3)
n3 = np.stack((m1, m3))
assert tuple(is_rotation(n3)) == (True, False) # (show the broadcasting)
def test_matrix_product_deprecation():
with pytest.warns(AstropyDeprecationWarning, match=r"Use @ instead\.$"):
matrix_product(np.eye(2))
|
be427c59af19979977425d78c5945958b87ca8d3eaa23e45ab5cb04585aed6ef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for coordinates-related bugs that don't have an obvious other
place to live
"""
import copy
import io
from contextlib import nullcontext
import numpy as np
import pytest
from erfa import ErfaWarning
from astropy import units as u
from astropy.coordinates import (
CIRS,
FK4,
GCRS,
HCRS,
ICRS,
ITRS,
AltAz,
BaseCoordinateFrame,
CartesianDifferential,
CartesianRepresentation,
CylindricalDifferential,
CylindricalRepresentation,
EarthLocation,
FK4NoETerms,
FunctionTransform,
GeocentricMeanEcliptic,
Latitude,
Longitude,
QuantityAttribute,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
get_body,
get_sun,
)
from astropy.coordinates.sites import get_builtin_sites
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils import iers
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test_regression_5085():
"""
PR #5085 was put in place to fix the following issue.
Issue: https://github.com/astropy/astropy/issues/5069
At root was the transformation of Ecliptic coordinates with
non-scalar times.
"""
# Note: for regression test, we need to be sure that we use UTC for the
# epoch, even though more properly that should be TT; but the "expected"
# values were calculated using that.
j2000 = Time("J2000", scale="utc")
times = Time(["2015-08-28 03:30", "2015-09-05 10:30", "2015-09-15 18:35"])
latitudes = Latitude([3.9807075, -5.00733806, 1.69539491] * u.deg)
longitudes = Longitude([311.79678613, 72.86626741, 199.58698226] * u.deg)
distances = u.Quantity([0.00243266, 0.0025424, 0.00271296] * u.au)
coo = GeocentricMeanEcliptic(
lat=latitudes, lon=longitudes, distance=distances, obstime=times, equinox=times
)
# expected result
ras = Longitude([310.50095400, 314.67109920, 319.56507428] * u.deg)
decs = Latitude([-18.25190443, -17.1556676, -15.71616522] * u.deg)
distances = u.Quantity([1.78309901, 1.710874, 1.61326649] * u.au)
expected_result = GCRS(
ra=ras, dec=decs, distance=distances, obstime=j2000
).cartesian.xyz
actual_result = coo.transform_to(GCRS(obstime=j2000)).cartesian.xyz
assert_quantity_allclose(expected_result, actual_result)
def test_regression_3920():
"""
Issue: https://github.com/astropy/astropy/issues/3920
"""
loc = EarthLocation.from_geodetic(0 * u.deg, 0 * u.deg, 0)
time = Time("2010-1-1")
aa = AltAz(location=loc, obstime=time)
sc = SkyCoord(10 * u.deg, 3 * u.deg)
assert sc.transform_to(aa).shape == tuple()
# That part makes sense: the input is a scalar so the output is too
sc2 = SkyCoord(10 * u.deg, 3 * u.deg, 1 * u.AU)
assert sc2.transform_to(aa).shape == tuple()
# in 3920 that assert fails, because the shape is (1,)
# check that the same behavior occurs even if transform is from low-level classes
icoo = ICRS(sc.data)
icoo2 = ICRS(sc2.data)
assert icoo.transform_to(aa).shape == tuple()
assert icoo2.transform_to(aa).shape == tuple()
def test_regression_3938():
"""
Issue: https://github.com/astropy/astropy/issues/3938
"""
# Set up list of targets - we don't use `from_name` here to avoid
# remote_data requirements, but it does the same thing
# vega = SkyCoord.from_name('Vega')
vega = SkyCoord(279.23473479 * u.deg, 38.78368896 * u.deg)
# capella = SkyCoord.from_name('Capella')
capella = SkyCoord(79.17232794 * u.deg, 45.99799147 * u.deg)
# sirius = SkyCoord.from_name('Sirius')
sirius = SkyCoord(101.28715533 * u.deg, -16.71611586 * u.deg)
targets = [vega, capella, sirius]
# Feed list of targets into SkyCoord
combined_coords = SkyCoord(targets)
# Set up AltAz frame
time = Time("2012-01-01 00:00:00")
location = EarthLocation("10d", "45d", 0)
aa = AltAz(location=location, obstime=time)
combined_coords.transform_to(aa)
# in 3938 the above yields ``UnitConversionError: '' (dimensionless) and 'pc' (length) are not convertible``
def test_regression_3998():
"""
Issue: https://github.com/astropy/astropy/issues/3998
"""
time = Time("2012-01-01 00:00:00")
assert time.isscalar
sun = get_sun(time)
assert sun.isscalar
# in 3998, the above yields False - `sun` is a length-1 vector
assert sun.obstime is time
def test_regression_4033():
"""
Issue: https://github.com/astropy/astropy/issues/4033
"""
# alb = SkyCoord.from_name('Albireo')
alb = SkyCoord(292.68033548 * u.deg, 27.95968007 * u.deg)
alb_wdist = SkyCoord(alb, distance=133 * u.pc)
# de = SkyCoord.from_name('Deneb')
de = SkyCoord(310.35797975 * u.deg, 45.28033881 * u.deg)
de_wdist = SkyCoord(de, distance=802 * u.pc)
aa = AltAz(
location=EarthLocation(lat=45 * u.deg, lon=0 * u.deg), obstime="2010-1-1"
)
deaa = de.transform_to(aa)
albaa = alb.transform_to(aa)
alb_wdistaa = alb_wdist.transform_to(aa)
de_wdistaa = de_wdist.transform_to(aa)
# these work fine
sepnod = deaa.separation(albaa)
sepwd = deaa.separation(alb_wdistaa)
assert_quantity_allclose(sepnod, 22.2862 * u.deg, rtol=1e-6)
assert_quantity_allclose(sepwd, 22.2862 * u.deg, rtol=1e-6)
# parallax should be present when distance added
assert np.abs(sepnod - sepwd) > 1 * u.marcsec
# in 4033, the following fail with a recursion error
assert_quantity_allclose(
de_wdistaa.separation(alb_wdistaa), 22.2862 * u.deg, rtol=1e-3
)
assert_quantity_allclose(alb_wdistaa.separation(deaa), 22.2862 * u.deg, rtol=1e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason="No Scipy")
def test_regression_4082():
"""
Issue: https://github.com/astropy/astropy/issues/4082
"""
from astropy.coordinates import search_around_3d, search_around_sky
cat = SkyCoord([10.076, 10.00455], [18.54746, 18.54896], unit="deg")
search_around_sky(cat[0:1], cat, seplimit=u.arcsec * 60, storekdtree=False)
# in the issue, this raises a TypeError
# also check 3d for good measure, although it's not really affected by this bug directly
cat3d = SkyCoord(
[10.076, 10.00455] * u.deg,
[18.54746, 18.54896] * u.deg,
distance=[0.1, 1.5] * u.kpc,
)
search_around_3d(cat3d[0:1], cat3d, 1 * u.kpc, storekdtree=False)
def test_regression_4210():
"""
Issue: https://github.com/astropy/astropy/issues/4210
Related PR with actual change: https://github.com/astropy/astropy/pull/4211
"""
crd = SkyCoord(0 * u.deg, 0 * u.deg, distance=1 * u.AU)
ecl = crd.geocentricmeanecliptic
# bug was that "lambda", which at the time was the name of the geocentric
# ecliptic longitude, is a reserved keyword. So this just makes sure the
# new name is are all valid
ecl.lon
# and for good measure, check the other ecliptic systems are all the same
# names for their attributes
from astropy.coordinates.builtin_frames import ecliptic
for frame_name in ecliptic.__all__:
eclcls = getattr(ecliptic, frame_name)
eclobj = eclcls(1 * u.deg, 2 * u.deg, 3 * u.AU)
eclobj.lat
eclobj.lon
eclobj.distance
def test_regression_futuretimes_4302():
"""
Checks that an error is not raised for future times not covered by IERS
tables (at least in a simple transform like CIRS->ITRS that simply requires
the UTC<->UT1 conversion).
Relevant comment: https://github.com/astropy/astropy/pull/4302#discussion_r44836531
"""
# this is an ugly hack to get the warning to show up even if it has already
# appeared
from astropy.coordinates.builtin_frames import utils
from astropy.utils.exceptions import AstropyWarning
if hasattr(utils, "__warningregistry__"):
utils.__warningregistry__.clear()
# check that out-of-range warning appears among any other warnings. If
# tests are run with --remote-data then the IERS table will be an instance
# of IERS_Auto which is assured of being "fresh". In this case getting
# times outside the range of the table does not raise an exception. Only
# if using IERS_B (which happens without --remote-data, i.e. for all CI
# testing) do we expect another warning.
if isinstance(iers.earth_orientation_table.get(), iers.IERS_B):
ctx = pytest.warns(
AstropyWarning,
match=r"\(some\) times are outside of range covered by IERS table.*",
)
else:
ctx = nullcontext()
with ctx:
future_time = Time("2511-5-1")
c = CIRS(1 * u.deg, 2 * u.deg, obstime=future_time)
c.transform_to(ITRS(obstime=future_time))
def test_regression_4996():
# this part is the actual regression test
deltat = np.linspace(-12, 12, 1000) * u.hour
times = Time("2012-7-13 00:00:00") + deltat
suncoo = get_sun(times)
assert suncoo.shape == (len(times),)
# and this is an additional test to make sure more complex arrays work
times2 = Time("2012-7-13 00:00:00") + deltat.reshape(10, 20, 5)
suncoo2 = get_sun(times2)
assert suncoo2.shape == times2.shape
# this is intentionally not allclose - they should be *exactly* the same
assert np.all(suncoo.ra.ravel() == suncoo2.ra.ravel())
def test_regression_4293():
"""Really just an extra test on FK4 no e, after finding that the units
were not always taken correctly. This test is against explicitly doing
the transformations on pp170 of Explanatory Supplement to the Astronomical
Almanac (Seidelmann, 2005).
See https://github.com/astropy/astropy/pull/4293#issuecomment-234973086
"""
# Check all over sky, but avoiding poles (note that FK4 did not ignore
# e terms within 10∘ of the poles... see p170 of explan.supp.).
ra, dec = np.meshgrid(np.arange(0, 359, 45), np.arange(-80, 81, 40))
fk4 = FK4(ra.ravel() * u.deg, dec.ravel() * u.deg)
Dc = -0.065838 * u.arcsec
Dd = +0.335299 * u.arcsec
# Dc * tan(obliquity), as given on p.170
Dctano = -0.028553 * u.arcsec
fk4noe_dec = (
fk4.dec
- (Dd * np.cos(fk4.ra) - Dc * np.sin(fk4.ra)) * np.sin(fk4.dec)
- Dctano * np.cos(fk4.dec)
)
fk4noe_ra = fk4.ra - (Dc * np.cos(fk4.ra) + Dd * np.sin(fk4.ra)) / np.cos(fk4.dec)
fk4noe = fk4.transform_to(FK4NoETerms())
# Tolerance here just set to how well the coordinates match, which is much
# better than the claimed accuracy of <1 mas for this first-order in
# v_earth/c approximation.
# Interestingly, if one divides by np.cos(fk4noe_dec) in the ra correction,
# the match becomes good to 2 μas.
assert_quantity_allclose(fk4noe.ra, fk4noe_ra, atol=11.0 * u.uas, rtol=0)
assert_quantity_allclose(fk4noe.dec, fk4noe_dec, atol=3.0 * u.uas, rtol=0)
def test_regression_4926():
times = Time("2010-01-1") + np.arange(20) * u.day
green = get_builtin_sites()["greenwich"]
# this is the regression test
moon = get_body("moon", times, green)
# this is an additional test to make sure the GCRS->ICRS transform works for complex shapes
moon.transform_to(ICRS())
# and some others to increase coverage of transforms
moon.transform_to(HCRS(obstime="J2000"))
moon.transform_to(HCRS(obstime=times))
def test_regression_5209():
"check that distances are not lost on SkyCoord init"
time = Time("2015-01-01")
moon = get_body("moon", time)
new_coord = SkyCoord([moon])
assert_quantity_allclose(new_coord[0].distance, moon.distance)
def test_regression_5133():
N = 1000
np.random.seed(12345)
lon = np.random.uniform(-10, 10, N) * u.deg
lat = np.random.uniform(50, 52, N) * u.deg
alt = np.random.uniform(0, 10.0, N) * u.km
time = Time("2010-1-1")
objects = EarthLocation.from_geodetic(lon, lat, height=alt)
itrs_coo = objects.get_itrs(time)
homes = [
EarthLocation.from_geodetic(lon=-1 * u.deg, lat=52 * u.deg, height=h)
for h in (0, 1000, 10000) * u.km
]
altaz_frames = [AltAz(obstime=time, location=h) for h in homes]
altaz_coos = [itrs_coo.transform_to(f) for f in altaz_frames]
# they should all be different
for coo in altaz_coos[1:]:
assert not quantity_allclose(coo.az, coo.az[0])
assert not quantity_allclose(coo.alt, coo.alt[0])
def test_itrs_vals_5133():
"""
Test to check if alt-az calculations respect height of observer
Because ITRS is geocentric and includes aberration, an object that
appears 'straight up' to a geocentric observer (ITRS) won't be
straight up to a topocentric observer - see
https://github.com/astropy/astropy/issues/10983
This is worse for small height above the Earth, which is why this test
uses large distances.
"""
time = Time("2010-1-1")
height = 500000.0 * u.km
el = EarthLocation.from_geodetic(lon=20 * u.deg, lat=45 * u.deg, height=height)
lons = [20, 30, 20] * u.deg
lats = [44, 45, 45] * u.deg
alts = u.Quantity([height, height, 10 * height])
coos = [
EarthLocation.from_geodetic(lon, lat, height=alt).get_itrs(time)
for lon, lat, alt in zip(lons, lats, alts)
]
aaf = AltAz(obstime=time, location=el)
aacs = [coo.transform_to(aaf) for coo in coos]
assert all(coo.isscalar for coo in aacs)
# the ~1 degree tolerance is b/c aberration makes it not exact
assert_quantity_allclose(aacs[0].az, 180 * u.deg, atol=1 * u.deg)
assert aacs[0].alt < 0 * u.deg
assert aacs[0].distance > 5000 * u.km
# it should *not* actually be 90 degrees, b/c constant latitude is not
# straight east anywhere except the equator... but should be close-ish
assert_quantity_allclose(aacs[1].az, 90 * u.deg, atol=5 * u.deg)
assert aacs[1].alt < 0 * u.deg
assert aacs[1].distance > 5000 * u.km
assert_quantity_allclose(aacs[2].alt, 90 * u.deg, atol=1 * u.arcminute)
assert_quantity_allclose(aacs[2].distance, 9 * height)
def test_regression_simple_5133():
"""
Simple test to check if alt-az calculations respect height of observer
Because ITRS is geocentric and includes aberration, an object that
appears 'straight up' to a geocentric observer (ITRS) won't be
straight up to a topocentric observer - see
https://github.com/astropy/astropy/issues/10983
This is why we construct a topocentric GCRS SkyCoord before calculating AltAz
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=[10.0, 0.0] * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=5.0 * u.km)
obsloc_gcrs, obsvel_gcrs = home.get_gcrs_posvel(t)
gcrs_geo = obj.get_itrs(t).transform_to(GCRS(obstime=t))
obsrepr = home.get_itrs(t).transform_to(GCRS(obstime=t)).cartesian
topo_gcrs_repr = gcrs_geo.cartesian - obsrepr
topocentric_gcrs_frame = GCRS(
obstime=t, obsgeoloc=obsloc_gcrs, obsgeovel=obsvel_gcrs
)
gcrs_topo = topocentric_gcrs_frame.realize_frame(topo_gcrs_repr)
aa = gcrs_topo.transform_to(AltAz(obstime=t, location=home))
# az is more-or-less undefined for straight up or down
assert_quantity_allclose(aa.alt, [90, -90] * u.deg, rtol=1e-7)
assert_quantity_allclose(aa.distance, 5 * u.km)
def test_regression_5743():
sc = SkyCoord(
[5, 10], [20, 30], unit=u.deg, obstime=["2017-01-01T00:00", "2017-01-01T00:10"]
)
assert sc[0].obstime.shape == tuple()
def test_regression_5889_5890():
# ensure we can represent all Representations and transform to ND frames
greenwich = EarthLocation(
*u.Quantity([3980608.90246817, -102.47522911, 4966861.27310067], unit=u.m)
)
times = Time("2017-03-20T12:00:00") + np.linspace(-2, 2, 3) * u.hour
moon = get_body("moon", times, location=greenwich)
targets = SkyCoord([350.7 * u.deg, 260.7 * u.deg], [18.4 * u.deg, 22.4 * u.deg])
targs2d = targets[:, np.newaxis]
targs2d.transform_to(moon)
def test_regression_6236():
# sunpy changes its representation upon initialisation of a frame,
# including via `realize_frame`. Ensure this works.
class MyFrame(BaseCoordinateFrame):
default_representation = CartesianRepresentation
my_attr = QuantityAttribute(default=0, unit=u.m)
class MySpecialFrame(MyFrame):
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get("representation_type", None)
super().__init__(*args, **kwargs)
if not _rep_kwarg:
self.representation_type = self.default_representation
self._data = self.data.represent_as(self.representation_type)
rep1 = UnitSphericalRepresentation([0.0, 1] * u.deg, [2.0, 3.0] * u.deg)
rep2 = SphericalRepresentation(
[10.0, 11] * u.deg, [12.0, 13.0] * u.deg, [14.0, 15.0] * u.kpc
)
mf1 = MyFrame(rep1, my_attr=1.0 * u.km)
mf2 = mf1.realize_frame(rep2)
# Normally, data is stored as is, but the representation gets set to a
# default, even if a different representation instance was passed in.
# realize_frame should do the same. Just in case, check attrs are passed.
assert mf1.data is rep1
assert mf2.data is rep2
assert mf1.representation_type is CartesianRepresentation
assert mf2.representation_type is CartesianRepresentation
assert mf2.my_attr == mf1.my_attr
# It should be independent of whether I set the representation explicitly
mf3 = MyFrame(rep1, my_attr=1.0 * u.km, representation_type="unitspherical")
mf4 = mf3.realize_frame(rep2)
assert mf3.data is rep1
assert mf4.data is rep2
assert mf3.representation_type is UnitSphericalRepresentation
assert mf4.representation_type is CartesianRepresentation
assert mf4.my_attr == mf3.my_attr
# This should be enough to help sunpy, but just to be sure, a test
# even closer to what is done there, i.e., transform the representation.
msf1 = MySpecialFrame(rep1, my_attr=1.0 * u.km)
msf2 = msf1.realize_frame(rep2)
assert msf1.data is not rep1 # Gets transformed to Cartesian.
assert msf2.data is not rep2
assert type(msf1.data) is CartesianRepresentation
assert type(msf2.data) is CartesianRepresentation
assert msf1.representation_type is CartesianRepresentation
assert msf2.representation_type is CartesianRepresentation
assert msf2.my_attr == msf1.my_attr
# And finally a test where the input is not transformed.
msf3 = MySpecialFrame(rep1, my_attr=1.0 * u.km, representation_type="unitspherical")
msf4 = msf3.realize_frame(rep2)
assert msf3.data is rep1
assert msf4.data is not rep2
assert msf3.representation_type is UnitSphericalRepresentation
assert msf4.representation_type is CartesianRepresentation
assert msf4.my_attr == msf3.my_attr
@pytest.mark.skipif(not HAS_SCIPY, reason="No Scipy")
def test_regression_6347():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = SkyCoord([1.1, 2.1] * u.deg, [3.1, 4.1] * u.deg)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_sky(sc2, 10 * u.arcmin)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_sky(sc2, 1 * u.arcmin)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_sky(sc2, 10 * u.arcmin)
assert len(d2d_10) == 2
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
@pytest.mark.skipif(not HAS_SCIPY, reason="No Scipy")
def test_regression_6347_3d():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, [5, 6] * u.kpc)
sc2 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, [5.1, 6.1] * u.kpc)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_3d(sc2, 500 * u.pc)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_3d(sc2, 50 * u.pc)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_3d(sc2, 500 * u.pc)
assert len(d2d_10) > 0
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
def test_gcrs_itrs_cartesian_repr():
# issue 6436: transformation failed if coordinate representation was
# Cartesian
gcrs = GCRS(
CartesianRepresentation((859.07256, -4137.20368, 5295.56871), unit="km"),
representation_type="cartesian",
)
gcrs.transform_to(ITRS())
def test_regression_6446():
# this succeeds even before 6446:
sc1 = SkyCoord([1, 2], [3, 4], unit="deg")
t1 = Table([sc1])
sio1 = io.StringIO()
t1.write(sio1, format="ascii.ecsv")
# but this fails due to the 6446 bug
c1 = SkyCoord(1, 3, unit="deg")
c2 = SkyCoord(2, 4, unit="deg")
sc2 = SkyCoord([c1, c2])
t2 = Table([sc2])
sio2 = io.StringIO()
t2.write(sio2, format="ascii.ecsv")
assert sio1.getvalue() == sio2.getvalue()
def test_regression_6597():
frame_name = "galactic"
c1 = SkyCoord(1, 3, unit="deg", frame=frame_name)
c2 = SkyCoord(2, 4, unit="deg", frame=frame_name)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame_name
def test_regression_6597_2():
"""
This tests the more subtle flaw that #6597 indirectly uncovered: that even
in the case that the frames are ra/dec, they still might be the wrong *kind*
"""
frame = FK4(equinox="J1949")
c1 = SkyCoord(1, 3, unit="deg", frame=frame)
c2 = SkyCoord(2, 4, unit="deg", frame=frame)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame.name
def test_regression_6697():
"""
Test for regression of a bug in get_gcrs_posvel that introduced errors at the 1m/s level.
Comparison data is derived from calculation in PINT
https://github.com/nanograv/PINT/blob/master/pint/erfautils.py
"""
pint_vels = CartesianRepresentation(
348.63632871, -212.31704928, -0.60154936, unit=u.m / u.s
)
location = EarthLocation(
5327448.9957829, -1718665.73869569, 3051566.90295403, unit=u.m
)
t = Time(2458036.161966612, format="jd")
obsgeopos, obsgeovel = location.get_gcrs_posvel(t)
delta = (obsgeovel - pint_vels).norm()
assert delta < 1 * u.cm / u.s
def test_regression_8138():
sc = SkyCoord(1 * u.deg, 2 * u.deg)
newframe = GCRS()
sc2 = sc.transform_to(newframe)
assert newframe.is_equivalent_frame(sc2.frame)
def test_regression_8276():
from astropy.coordinates import baseframe
class MyFrame(BaseCoordinateFrame):
a = QuantityAttribute(unit=u.m)
# we save the transform graph so that it doesn't accidentally mess with other tests
old_transform_graph = baseframe.frame_transform_graph
try:
baseframe.frame_transform_graph = copy.copy(baseframe.frame_transform_graph)
# as reported in 8276, this previously failed right here because
# registering the transform tries to create a frame attribute
@baseframe.frame_transform_graph.transform(FunctionTransform, MyFrame, AltAz)
def trans(my_frame_coord, altaz_frame):
pass
# should also be able to *create* the Frame at this point
MyFrame()
finally:
baseframe.frame_transform_graph = old_transform_graph
def test_regression_8615():
# note this is a "higher-level" symptom of the problem that a test now moved
# to pyerfa (erfa/tests/test_erfa:test_float32_input) is testing for, but we keep
# it here as well due to being a more practical version of the issue.
crf = CartesianRepresentation(np.array([3, 0, 4], dtype=float) * u.pc)
srf = SphericalRepresentation.from_cartesian(crf) # does not error in 8615
cr = CartesianRepresentation(np.array([3, 0, 4], dtype="f4") * u.pc)
sr = SphericalRepresentation.from_cartesian(cr) # errors in 8615
assert_quantity_allclose(sr.distance, 5 * u.pc)
assert_quantity_allclose(srf.distance, 5 * u.pc)
def test_regression_8924():
"""This checks that the ValueError in
BaseRepresentation._re_represent_differentials is raised properly
"""
# A case where the representation has a 's' differential, but we try to
# re-represent only with an 's2' differential
rep = CartesianRepresentation(1, 2, 3, unit=u.kpc)
dif = CartesianDifferential(4, 5, 6, u.km / u.s)
rep = rep.with_differentials(dif)
with pytest.raises(ValueError):
rep._re_represent_differentials(
CylindricalRepresentation, {"s2": CylindricalDifferential}
)
def test_regression_10092():
"""
Check that we still get a proper motion even for SkyCoords without distance
"""
c = SkyCoord(
l=10 * u.degree,
b=45 * u.degree,
pm_l_cosb=34 * u.mas / u.yr,
pm_b=-117 * u.mas / u.yr,
frame="galactic",
obstime=Time("1988-12-18 05:11:23.5"),
)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
newc = c.apply_space_motion(dt=10 * u.year)
assert_quantity_allclose(
newc.pm_l_cosb, 33.99980714 * u.mas / u.yr, atol=1.0e-5 * u.mas / u.yr
)
def test_regression_10226():
# Dictionary representation of SkyCoord should contain differentials.
sc = SkyCoord(
[270, 280] * u.deg,
[30, 35] * u.deg,
[10, 11] * u.pc,
radial_velocity=[20, -20] * u.km / u.s,
)
sc_as_dict = sc.info._represent_as_dict()
assert "radial_velocity" in sc_as_dict
# But only the components that have been specified.
assert "pm_dec" not in sc_as_dict
@pytest.mark.parametrize(
"mjd", (52000, [52000], [[52000]], [52001, 52002], [[52001], [52002]])
)
def test_regression_10422(mjd):
"""
Check that we can get a GCRS for a scalar EarthLocation and a
size=1 non-scalar Time.
"""
# Avoid trying to download new IERS data.
with iers.earth_orientation_table.set(iers.IERS_B.open(iers.IERS_B_FILE)):
t = Time(mjd, format="mjd", scale="tai")
loc = EarthLocation(88258.0 * u.m, -4924882.2 * u.m, 3943729.0 * u.m)
p, v = loc.get_gcrs_posvel(obstime=t)
assert p.shape == v.shape == t.shape
@pytest.mark.remote_data
def test_regression_10291():
"""
According to https://eclipse.gsfc.nasa.gov/OH/transit12.html,
the minimum separation between Venus and the Sun during the 2012
transit is 554 arcseconds for an observer at the Geocenter.
If light deflection from the Sun is incorrectly applied, this increases
to 557 arcseconds.
"""
t = Time("2012-06-06 01:29:36")
sun = get_body("sun", t)
venus = get_body("venus", t)
assert_quantity_allclose(
venus.separation(sun), 554.427 * u.arcsecond, atol=0.001 * u.arcsecond
)
|
3bdd32c9a6bb1bb3dd28c68a8ee5267bda6ad682bcaf333087dbf4061e327d07 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for GCRS coordinate transformations, primarily to/from AltAz.
"""
import os
import warnings
from importlib import metadata
import erfa
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
CIRS,
GCRS,
HCRS,
ICRS,
ITRS,
TEME,
TETE,
AltAz,
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
HADec,
HeliocentricMeanEcliptic,
PrecessedGeocentric,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
get_sun,
solar_system_ephemeris,
)
from astropy.coordinates.angle_utilities import golden_spiral_grid
from astropy.coordinates.builtin_frames.intermediate_rotation_transforms import (
cirs_to_itrs_mat,
gcrs_to_cirs_mat,
get_location_gcrs,
tete_to_itrs_mat,
)
from astropy.coordinates.builtin_frames.utils import get_jd12
from astropy.coordinates.solar_system import (
_apparent_position_in_true_coordinates,
get_body,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
from astropy.utils import iers
from astropy.utils.compat.optional_deps import HAS_JPLEPHEM
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
CI = os.environ.get("CI", False) == "true"
def test_icrs_cirs():
"""
Check a few cases of ICRS<->CIRS for consistency.
Also includes the CIRS<->CIRS transforms at different times, as those go
through ICRS
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.0, 1, len(usph)) * u.pc
inod = ICRS(usph)
iwd = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
cframe1 = CIRS()
cirsnod = inod.transform_to(cframe1) # uses the default time
# first do a round-tripping test
inod2 = cirsnod.transform_to(ICRS())
assert_allclose(inod.ra, inod2.ra)
assert_allclose(inod.dec, inod2.dec)
# now check that a different time yields different answers
cframe2 = CIRS(obstime=Time("J2005"))
cirsnod2 = inod.transform_to(cframe2)
assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8)
assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8)
# parallax effects should be included, so with and w/o distance should be different
cirswd = iwd.transform_to(cframe1)
assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8)
# and the distance should transform at least somehow
assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8)
# now check that the cirs self-transform works as expected
cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op
assert_allclose(cirsnod.ra, cirsnod3.ra)
assert_allclose(cirsnod.dec, cirsnod3.dec)
cirsnod4 = cirsnod.transform_to(cframe2) # should be different
assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8)
cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same
assert_allclose(cirsnod.ra, cirsnod5.ra)
assert_allclose(cirsnod.dec, cirsnod5.dec)
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
icrs_coords = [ICRS(usph), ICRS(usph.lon, usph.lat, distance=dist)]
gcrs_frames = [GCRS(), GCRS(obstime=Time("J2005"))]
@pytest.mark.parametrize("icoo", icrs_coords)
def test_icrs_gcrs(icoo):
"""
Check ICRS<->GCRS for consistency
"""
gcrscoo = icoo.transform_to(gcrs_frames[0]) # uses the default time
# first do a round-tripping test
icoo2 = gcrscoo.transform_to(ICRS())
assert_allclose(icoo.distance, icoo2.distance)
assert_allclose(icoo.ra, icoo2.ra)
assert_allclose(icoo.dec, icoo2.dec)
assert isinstance(icoo2.data, icoo.data.__class__)
# now check that a different time yields different answers
gcrscoo2 = icoo.transform_to(gcrs_frames[1])
assert not allclose(gcrscoo.ra, gcrscoo2.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo.dec, gcrscoo2.dec, rtol=1e-8, atol=1e-10 * u.deg)
# now check that the cirs self-transform works as expected
gcrscoo3 = gcrscoo.transform_to(gcrs_frames[0]) # should be a no-op
assert_allclose(gcrscoo.ra, gcrscoo3.ra)
assert_allclose(gcrscoo.dec, gcrscoo3.dec)
gcrscoo4 = gcrscoo.transform_to(gcrs_frames[1]) # should be different
assert not allclose(gcrscoo4.ra, gcrscoo.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo4.dec, gcrscoo.dec, rtol=1e-8, atol=1e-10 * u.deg)
gcrscoo5 = gcrscoo4.transform_to(gcrs_frames[0]) # should be back to the same
assert_allclose(gcrscoo.ra, gcrscoo5.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert_allclose(gcrscoo.dec, gcrscoo5.dec, rtol=1e-8, atol=1e-10 * u.deg)
# also make sure that a GCRS with a different geoloc/geovel gets a different answer
# roughly a moon-like frame
gframe3 = GCRS(obsgeoloc=[385000.0, 0, 0] * u.km, obsgeovel=[1, 0, 0] * u.km / u.s)
gcrscoo6 = icoo.transform_to(gframe3) # should be different
assert not allclose(gcrscoo.ra, gcrscoo6.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo.dec, gcrscoo6.dec, rtol=1e-8, atol=1e-10 * u.deg)
icooviag3 = gcrscoo6.transform_to(ICRS()) # and now back to the original
assert_allclose(icoo.ra, icooviag3.ra)
assert_allclose(icoo.dec, icooviag3.dec)
@pytest.mark.parametrize("gframe", gcrs_frames)
def test_icrs_gcrs_dist_diff(gframe):
"""
Check that with and without distance give different ICRS<->GCRS answers
"""
gcrsnod = icrs_coords[0].transform_to(gframe)
gcrswd = icrs_coords[1].transform_to(gframe)
# parallax effects should be included, so with and w/o distance should be different
assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10 * u.deg)
# and the distance should transform at least somehow
assert not allclose(
gcrswd.distance, icrs_coords[1].distance, rtol=1e-8, atol=1e-10 * u.pc
)
def test_cirs_to_altaz():
"""
Check the basic CIRS<->AltAz transforms. More thorough checks implicitly
happen in `test_iau_fullstack`
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
cirs = CIRS(usph, obstime="J2000")
crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist)
cirscart = CIRS(
crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
altazframe = AltAz(location=loc, obstime=Time("J2005"))
cirs2 = cirs.transform_to(altazframe).transform_to(cirs)
cirs3 = cirscart.transform_to(altazframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_cirs_to_hadec():
"""
Check the basic CIRS<->HADec transforms.
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
cirs = CIRS(usph, obstime="J2000")
crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist)
cirscart = CIRS(
crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
hadecframe = HADec(location=loc, obstime=Time("J2005"))
cirs2 = cirs.transform_to(hadecframe).transform_to(cirs)
cirs3 = cirscart.transform_to(hadecframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_itrs_topo_to_altaz_with_refraction():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
dist = np.linspace(1.0, 1000.0, len(usph)) * u.au
icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
altaz_frame1 = AltAz(obstime="J2000", location=loc)
altaz_frame2 = AltAz(
obstime="J2000", location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5
)
cirs_frame = CIRS(obstime="J2000", location=loc)
itrs_frame = ITRS(location=loc)
# Normal route
# No Refraction
altaz1 = icrs.transform_to(altaz_frame1)
# Refraction added
altaz2 = icrs.transform_to(altaz_frame2)
# Refraction removed
cirs = altaz2.transform_to(cirs_frame)
altaz3 = cirs.transform_to(altaz_frame1)
# Through ITRS
# No Refraction
itrs = icrs.transform_to(itrs_frame)
altaz11 = itrs.transform_to(altaz_frame1)
assert_allclose(altaz11.az - altaz1.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz11.alt - altaz1.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz11.distance - altaz1.distance, 0 * u.cm, atol=10.0 * u.cm)
# Round trip
itrs11 = altaz11.transform_to(itrs_frame)
assert_allclose(itrs11.x, itrs.x)
assert_allclose(itrs11.y, itrs.y)
assert_allclose(itrs11.z, itrs.z)
# Refraction added
altaz22 = itrs.transform_to(altaz_frame2)
assert_allclose(altaz22.az - altaz2.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz22.alt - altaz2.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz22.distance - altaz2.distance, 0 * u.cm, atol=10.0 * u.cm)
# Refraction removed
itrs = altaz22.transform_to(itrs_frame)
altaz33 = itrs.transform_to(altaz_frame1)
assert_allclose(altaz33.az - altaz3.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz33.alt - altaz3.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz33.distance - altaz3.distance, 0 * u.cm, atol=10.0 * u.cm)
def test_itrs_topo_to_hadec_with_refraction():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
dist = np.linspace(1.0, 1000.0, len(usph)) * u.au
icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
hadec_frame1 = HADec(obstime="J2000", location=loc)
hadec_frame2 = HADec(
obstime="J2000", location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5
)
cirs_frame = CIRS(obstime="J2000", location=loc)
itrs_frame = ITRS(location=loc)
# Normal route
# No Refraction
hadec1 = icrs.transform_to(hadec_frame1)
# Refraction added
hadec2 = icrs.transform_to(hadec_frame2)
# Refraction removed
cirs = hadec2.transform_to(cirs_frame)
hadec3 = cirs.transform_to(hadec_frame1)
# Through ITRS
# No Refraction
itrs = icrs.transform_to(itrs_frame)
hadec11 = itrs.transform_to(hadec_frame1)
assert_allclose(hadec11.ha - hadec1.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec11.dec - hadec1.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec11.distance - hadec1.distance, 0 * u.cm, atol=10.0 * u.cm)
# Round trip
itrs11 = hadec11.transform_to(itrs_frame)
assert_allclose(itrs11.x, itrs.x)
assert_allclose(itrs11.y, itrs.y)
assert_allclose(itrs11.z, itrs.z)
# Refraction added
hadec22 = itrs.transform_to(hadec_frame2)
assert_allclose(hadec22.ha - hadec2.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec22.dec - hadec2.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec22.distance - hadec2.distance, 0 * u.cm, atol=10.0 * u.cm)
# Refraction removed
itrs = hadec22.transform_to(itrs_frame)
hadec33 = itrs.transform_to(hadec_frame1)
assert_allclose(hadec33.ha - hadec3.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec33.dec - hadec3.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec33.distance - hadec3.distance, 0 * u.cm, atol=10.0 * u.cm)
def test_gcrs_itrs():
"""
Check basic GCRS<->ITRS transforms for round-tripping.
"""
usph = golden_spiral_grid(200)
gcrs = GCRS(usph, obstime="J2000")
gcrs6 = GCRS(usph, obstime="J2006")
gcrs2 = gcrs.transform_to(ITRS()).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(ITRS()).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
# these should be different:
assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8)
assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8)
# also try with the cartesian representation
gcrsc = gcrs.realize_frame(gcrs.data)
gcrsc.representation_type = CartesianRepresentation
gcrsc2 = gcrsc.transform_to(ITRS()).transform_to(gcrsc)
assert_allclose(gcrsc.spherical.lon, gcrsc2.ra)
assert_allclose(gcrsc.spherical.lat, gcrsc2.dec)
def test_cirs_itrs():
"""
Check basic CIRS<->ITRS geocentric transforms for round-tripping.
"""
usph = golden_spiral_grid(200)
cirs = CIRS(usph, obstime="J2000")
cirs6 = CIRS(usph, obstime="J2006")
cirs2 = cirs.transform_to(ITRS()).transform_to(cirs)
cirs6_2 = cirs6.transform_to(ITRS()).transform_to(cirs) # different obstime
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_cirs_itrs_topo():
"""
Check basic CIRS<->ITRS topocentric transforms for round-tripping.
"""
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
cirs = CIRS(usph, obstime="J2000", location=loc)
cirs6 = CIRS(usph, obstime="J2006", location=loc)
cirs2 = cirs.transform_to(ITRS(location=loc)).transform_to(cirs)
# different obstime
cirs6_2 = cirs6.transform_to(ITRS(location=loc)).transform_to(cirs)
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_gcrs_cirs():
"""
Check GCRS<->CIRS transforms for round-tripping. More complicated than the
above two because it's multi-hop
"""
usph = golden_spiral_grid(200)
gcrs = GCRS(usph, obstime="J2000")
gcrs6 = GCRS(usph, obstime="J2006")
gcrs2 = gcrs.transform_to(CIRS()).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(CIRS()).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
# these should be different:
assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8)
assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8)
# now try explicit intermediate pathways and ensure they're all consistent
gcrs3 = (
gcrs.transform_to(ITRS())
.transform_to(CIRS())
.transform_to(ITRS())
.transform_to(gcrs)
)
assert_allclose(gcrs.ra, gcrs3.ra)
assert_allclose(gcrs.dec, gcrs3.dec)
gcrs4 = (
gcrs.transform_to(ICRS())
.transform_to(CIRS())
.transform_to(ICRS())
.transform_to(gcrs)
)
assert_allclose(gcrs.ra, gcrs4.ra)
assert_allclose(gcrs.dec, gcrs4.dec)
def test_gcrs_altaz():
"""
Check GCRS<->AltAz transforms for round-tripping. Has multiple paths
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(128)
gcrs = GCRS(usph, obstime="J2000")[None] # broadcast with times below
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format="jd")[:, None]
loc = EarthLocation(lon=10 * u.deg, lat=80.0 * u.deg)
aaframe = AltAz(obstime=times, location=loc)
aa1 = gcrs.transform_to(aaframe)
aa2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(aaframe)
aa3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(aaframe)
# make sure they're all consistent
assert_allclose(aa1.alt, aa2.alt)
assert_allclose(aa1.az, aa2.az)
assert_allclose(aa1.alt, aa3.alt)
assert_allclose(aa1.az, aa3.az)
def test_gcrs_hadec():
"""
Check GCRS<->HADec transforms for round-tripping. Has multiple paths
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(128)
gcrs = GCRS(usph, obstime="J2000") # broadcast with times below
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format="jd")[:, None]
loc = EarthLocation(lon=10 * u.deg, lat=80.0 * u.deg)
hdframe = HADec(obstime=times, location=loc)
hd1 = gcrs.transform_to(hdframe)
hd2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(hdframe)
hd3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(hdframe)
# make sure they're all consistent
assert_allclose(hd1.dec, hd2.dec)
assert_allclose(hd1.ha, hd2.ha)
assert_allclose(hd1.dec, hd3.dec)
assert_allclose(hd1.ha, hd3.ha)
def test_precessed_geocentric():
assert PrecessedGeocentric().equinox.jd == Time("J2000").jd
gcrs_coo = GCRS(180 * u.deg, 2 * u.deg, distance=10000 * u.km)
pgeo_coo = gcrs_coo.transform_to(PrecessedGeocentric())
assert np.abs(gcrs_coo.ra - pgeo_coo.ra) > 10 * u.marcsec
assert np.abs(gcrs_coo.dec - pgeo_coo.dec) > 10 * u.marcsec
assert_allclose(gcrs_coo.distance, pgeo_coo.distance)
gcrs_roundtrip = pgeo_coo.transform_to(GCRS())
assert_allclose(gcrs_coo.ra, gcrs_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs_roundtrip.distance)
pgeo_coo2 = gcrs_coo.transform_to(PrecessedGeocentric(equinox="B1850"))
assert np.abs(gcrs_coo.ra - pgeo_coo2.ra) > 1.5 * u.deg
assert np.abs(gcrs_coo.dec - pgeo_coo2.dec) > 0.5 * u.deg
assert_allclose(gcrs_coo.distance, pgeo_coo2.distance)
gcrs2_roundtrip = pgeo_coo2.transform_to(GCRS())
assert_allclose(gcrs_coo.ra, gcrs2_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs2_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs2_roundtrip.distance)
def test_precessed_geocentric_different_obstime():
# Create two PrecessedGeocentric frames with different obstime
precessedgeo1 = PrecessedGeocentric(obstime="2021-09-07")
precessedgeo2 = PrecessedGeocentric(obstime="2021-06-07")
# GCRS->PrecessedGeocentric should give different results for the two frames
gcrs_coord = GCRS(10 * u.deg, 20 * u.deg, 3 * u.AU, obstime=precessedgeo1.obstime)
pg_coord1 = gcrs_coord.transform_to(precessedgeo1)
pg_coord2 = gcrs_coord.transform_to(precessedgeo2)
assert not pg_coord1.is_equivalent_frame(pg_coord2)
assert not allclose(pg_coord1.cartesian.xyz, pg_coord2.cartesian.xyz)
# Looping back to GCRS should return the original coordinate
loopback1 = pg_coord1.transform_to(gcrs_coord)
loopback2 = pg_coord2.transform_to(gcrs_coord)
assert loopback1.is_equivalent_frame(gcrs_coord)
assert loopback2.is_equivalent_frame(gcrs_coord)
assert_allclose(loopback1.cartesian.xyz, gcrs_coord.cartesian.xyz)
assert_allclose(loopback2.cartesian.xyz, gcrs_coord.cartesian.xyz)
# shared by parametrized tests below. Some use the whole AltAz, others use just obstime
totest_frames = [
# J2000 is often a default so this might work when others don't
AltAz(location=EarthLocation(-90 * u.deg, 65 * u.deg), obstime=Time("J2000")),
AltAz(location=EarthLocation(120 * u.deg, -35 * u.deg), obstime=Time("J2000")),
AltAz(
location=EarthLocation(-90 * u.deg, 65 * u.deg),
obstime=Time("2014-01-01 00:00:00"),
),
AltAz(
location=EarthLocation(-90 * u.deg, 65 * u.deg),
obstime=Time("2014-08-01 08:00:00"),
),
AltAz(
location=EarthLocation(120 * u.deg, -35 * u.deg),
obstime=Time("2014-01-01 00:00:00"),
),
]
MOONDIST = 385000 * u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(
3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST
)
# roughly earth orbital eccentricity, but with an added tolerance
EARTHECC = 0.017 + 0.005
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_sunish(testframe):
"""
Sanity-check that the sun is at a reasonable distance from any altaz
"""
sun = get_sun(testframe.obstime)
assert sun.frame.name == "gcrs"
# the .to(u.au) is not necessary, it just makes the asserts on failure more readable
assert (EARTHECC - 1) * u.au < sun.distance.to(u.au) < (EARTHECC + 1) * u.au
sunaa = sun.transform_to(testframe)
assert (EARTHECC - 1) * u.au < sunaa.distance.to(u.au) < (EARTHECC + 1) * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a GCRS->AltAz transformation
"""
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000 * u.km < np.abs(moonaa.distance - moon.distance).to(u.au) < 7000 * u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
# also should add checks that the alt/az are different for different earth locations
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_bothroutes(testframe):
"""
Repeat of both the moonish and sunish tests above to make sure the two
routes through the coordinate graph are consistent with each other
"""
sun = get_sun(testframe.obstime)
sunaa_viaicrs = sun.transform_to(ICRS()).transform_to(testframe)
sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(
testframe
)
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa_viaicrs = moon.transform_to(ICRS()).transform_to(testframe)
moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(
testframe
)
assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz)
assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a CIRS<->AltAz transformation
"""
moon = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
assert 1000 * u.km < np.abs(moonaa.distance - moon.distance).to(u.km) < 7000 * u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_altaz_nodist(testframe):
"""
Check that a UnitSphericalRepresentation coordinate round-trips for the
CIRS<->AltAz transformation.
"""
coo0 = CIRS(
UnitSphericalRepresentation(10 * u.deg, 20 * u.deg), obstime=testframe.obstime
)
# check that it round-trips
coo1 = coo0.transform_to(testframe).transform_to(coo0)
assert_allclose(coo0.cartesian.xyz, coo1.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from CIRS
"""
moonish = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS())
assert 0.97 * u.au < moonicrs.distance < 1.03 * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from GCRS
"""
moonish = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS())
assert 0.97 * u.au < moonicrs.distance < 1.03 * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_icrs_gcrscirs_sunish(testframe):
"""
check that the ICRS barycenter goes to about the right distance from various
~geocentric frames (other than testframe)
"""
# slight offset to avoid divide-by-zero errors
icrs = ICRS(0 * u.deg, 0 * u.deg, distance=10 * u.km)
gcrs = icrs.transform_to(GCRS(obstime=testframe.obstime))
assert (EARTHECC - 1) * u.au < gcrs.distance.to(u.au) < (EARTHECC + 1) * u.au
cirs = icrs.transform_to(CIRS(obstime=testframe.obstime))
assert (EARTHECC - 1) * u.au < cirs.distance.to(u.au) < (EARTHECC + 1) * u.au
itrs = icrs.transform_to(ITRS(obstime=testframe.obstime))
assert (
(EARTHECC - 1) * u.au < itrs.spherical.distance.to(u.au) < (EARTHECC + 1) * u.au
)
@pytest.mark.parametrize("testframe", totest_frames)
def test_icrs_altaz_moonish(testframe):
"""
Check that something expressed in *ICRS* as being moon-like goes to the
right AltAz distance
"""
# we use epv00 instead of get_sun because get_sun includes aberration
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(testframe.obstime, "tdb"))
earth_icrs_xyz = earth_pv_bary[0] * u.au
moonoffset = [0, 0, MOONDIST.value] * MOONDIST.unit
moonish_icrs = ICRS(CartesianRepresentation(earth_icrs_xyz + moonoffset))
moonaa = moonish_icrs.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000 * u.km < np.abs(moonaa.distance - MOONDIST).to(u.au) < 7000 * u.km
def test_gcrs_self_transform_closeby():
"""
Tests GCRS self transform for objects which are nearby and thus
have reasonable parallax.
Moon positions were originally created using JPL DE432s ephemeris.
The two lunar positions (one geocentric, one at a defined location)
are created via a transformation from ICRS to two different GCRS frames.
We test that the GCRS-GCRS self transform can correctly map one GCRS
frame onto the other.
"""
t = Time("2014-12-25T07:00")
moon_geocentric = SkyCoord(
GCRS(
318.10579159 * u.deg,
-11.65281165 * u.deg,
365042.64880308 * u.km,
obstime=t,
)
)
# this is the location of the Moon as seen from La Palma
obsgeoloc = [-5592982.59658935, -63054.1948592, 3059763.90102216] * u.m
obsgeovel = [4.59798494, -407.84677071, 0.0] * u.m / u.s
moon_lapalma = SkyCoord(
GCRS(
318.7048445 * u.deg,
-11.98761996 * u.deg,
369722.8231031 * u.km,
obstime=t,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel,
)
)
transformed = moon_geocentric.transform_to(moon_lapalma.frame)
delta = transformed.separation_3d(moon_lapalma)
assert_allclose(delta, 0.0 * u.m, atol=1 * u.m)
def test_teme_itrf():
"""
Test case transform from TEME to ITRF.
Test case derives from example on appendix C of Vallado, Crawford, Hujsak & Kelso (2006).
See https://celestrak.com/publications/AIAA/2006-6753/AIAA-2006-6753-Rev2.pdf
"""
v_itrf = CartesianDifferential(
-3.225636520, -2.872451450, 5.531924446, unit=u.km / u.s
)
p_itrf = CartesianRepresentation(
-1033.479383,
7901.2952740,
6380.35659580,
unit=u.km,
differentials={"s": v_itrf},
)
t = Time("2004-04-06T07:51:28.386")
teme = ITRS(p_itrf, obstime=t).transform_to(TEME(obstime=t))
v_teme = CartesianDifferential(
-4.746131487, 0.785818041, 5.531931288, unit=u.km / u.s
)
p_teme = CartesianRepresentation(
5094.18016210,
6127.64465050,
6380.34453270,
unit=u.km,
differentials={"s": v_teme},
)
assert_allclose(
teme.cartesian.without_differentials().xyz,
p_teme.without_differentials().xyz,
atol=30 * u.cm,
)
assert_allclose(
teme.cartesian.differentials["s"].d_xyz,
p_teme.differentials["s"].d_xyz,
atol=1.0 * u.cm / u.s,
)
# test round trip
itrf = teme.transform_to(ITRS(obstime=t))
assert_allclose(
itrf.cartesian.without_differentials().xyz,
p_itrf.without_differentials().xyz,
atol=100 * u.cm,
)
assert_allclose(
itrf.cartesian.differentials["s"].d_xyz,
p_itrf.differentials["s"].d_xyz,
atol=1 * u.cm / u.s,
)
def test_precessedgeocentric_loopback():
from_coo = PrecessedGeocentric(
1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-01-01", equinox="2001-01-01"
)
# Change just the obstime
to_frame = PrecessedGeocentric(obstime="2001-06-30", equinox="2001-01-01")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the coordinate
assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10)
assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10)
assert not allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10)
assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10)
assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
# Change just the equinox
to_frame = PrecessedGeocentric(obstime="2001-01-01", equinox="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the direction but not the distance
assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10)
assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10)
assert allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10)
assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10)
assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
def test_teme_loopback():
from_coo = TEME(1 * u.AU, 2 * u.AU, 3 * u.AU, obstime="2001-01-01")
to_frame = TEME(obstime="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the coordinate
assert not allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
@pytest.mark.remote_data
def test_earth_orientation_table(monkeypatch):
"""Check that we can set the IERS table used as Earth Reference.
Use the here and now to be sure we get a difference.
"""
monkeypatch.setattr("astropy.utils.iers.conf.auto_download", True)
t = Time.now()
location = EarthLocation(lat=0 * u.deg, lon=0 * u.deg)
altaz = AltAz(location=location, obstime=t)
sc = SkyCoord(1 * u.deg, 2 * u.deg)
# Default: uses IERS_Auto, which will give a prediction.
# Note: tests run with warnings turned into errors, so it is
# meaningful if this passes.
if CI:
with warnings.catch_warnings():
# Server occasionally blocks IERS download in CI.
warnings.filterwarnings("ignore", message=r".*using local IERS-B.*")
# This also captures unclosed socket warning that is ignored in setup.cfg
warnings.filterwarnings("ignore", message=r".*unclosed.*")
altaz_auto = sc.transform_to(altaz)
else:
altaz_auto = sc.transform_to(altaz) # No warnings
with iers.earth_orientation_table.set(iers.IERS_B.open()):
with pytest.warns(AstropyWarning, match="after IERS data"):
altaz_b = sc.transform_to(altaz)
sep_b_auto = altaz_b.separation(altaz_auto)
assert_allclose(sep_b_auto, 0.0 * u.deg, atol=1 * u.arcsec)
assert sep_b_auto > 10 * u.microarcsecond
# Check we returned to regular IERS system.
altaz_auto2 = sc.transform_to(altaz)
assert altaz_auto2.separation(altaz_auto) == 0.0
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_ephemerides():
"""
We test that using different ephemerides gives very similar results
for transformations
"""
t = Time("2014-12-25T07:00")
moon = SkyCoord(
GCRS(
318.10579159 * u.deg,
-11.65281165 * u.deg,
365042.64880308 * u.km,
obstime=t,
)
)
icrs_frame = ICRS()
hcrs_frame = HCRS(obstime=t)
ecl_frame = HeliocentricMeanEcliptic(equinox=t)
cirs_frame = CIRS(obstime=t)
moon_icrs_builtin = moon.transform_to(icrs_frame)
moon_hcrs_builtin = moon.transform_to(hcrs_frame)
moon_helioecl_builtin = moon.transform_to(ecl_frame)
moon_cirs_builtin = moon.transform_to(cirs_frame)
with solar_system_ephemeris.set("jpl"):
moon_icrs_jpl = moon.transform_to(icrs_frame)
moon_hcrs_jpl = moon.transform_to(hcrs_frame)
moon_helioecl_jpl = moon.transform_to(ecl_frame)
moon_cirs_jpl = moon.transform_to(cirs_frame)
# most transformations should differ by an amount which is
# non-zero but of order milliarcsecs
sep_icrs = moon_icrs_builtin.separation(moon_icrs_jpl)
sep_hcrs = moon_hcrs_builtin.separation(moon_hcrs_jpl)
sep_helioecl = moon_helioecl_builtin.separation(moon_helioecl_jpl)
sep_cirs = moon_cirs_builtin.separation(moon_cirs_jpl)
assert_allclose([sep_icrs, sep_hcrs, sep_helioecl], 0.0 * u.deg, atol=10 * u.mas)
assert all(
sep > 10 * u.microarcsecond for sep in (sep_icrs, sep_hcrs, sep_helioecl)
)
# CIRS should be the same
assert_allclose(sep_cirs, 0.0 * u.deg, atol=1 * u.microarcsecond)
def test_tete_transforms():
"""
We test the TETE transforms for proper behaviour here.
The TETE transforms are tested for accuracy against JPL Horizons in
test_solar_system.py. Here we are looking to check for consistency and
errors in the self transform.
"""
loc = EarthLocation.from_geodetic("-22°57'35.1", "-67°47'14.1", 5186 * u.m)
time = Time("2020-04-06T00:00")
p, v = loc.get_gcrs_posvel(time)
gcrs_frame = GCRS(obstime=time, obsgeoloc=p, obsgeovel=v)
moon = SkyCoord(
169.24113968 * u.deg,
10.86086666 * u.deg,
358549.25381755 * u.km,
frame=gcrs_frame,
)
tete_frame = TETE(obstime=time, location=loc)
# need to set obsgeoloc/vel explicitly or skycoord behaviour over-writes
tete_geo = TETE(obstime=time, location=EarthLocation(*([0, 0, 0] * u.km)))
# test self-transform by comparing to GCRS-TETE-ITRS-TETE route
tete_coo1 = moon.transform_to(tete_frame)
tete_coo2 = moon.transform_to(tete_geo)
assert_allclose(tete_coo1.separation_3d(tete_coo2), 0 * u.mm, atol=1 * u.mm)
# test TETE-ITRS transform by comparing GCRS-CIRS-ITRS to GCRS-TETE-ITRS
itrs1 = moon.transform_to(CIRS()).transform_to(ITRS())
itrs2 = moon.transform_to(TETE()).transform_to(ITRS())
assert_allclose(itrs1.separation_3d(itrs2), 0 * u.mm, atol=1 * u.mm)
# test round trip GCRS->TETE->GCRS
new_moon = moon.transform_to(TETE()).transform_to(moon)
assert_allclose(new_moon.separation_3d(moon), 0 * u.mm, atol=1 * u.mm)
# test round trip via ITRS
tete_rt = tete_coo1.transform_to(ITRS(obstime=time)).transform_to(tete_coo1)
assert_allclose(tete_rt.separation_3d(tete_coo1), 0 * u.mm, atol=1 * u.mm)
# ensure deprecated routine remains consistent
# make sure test raises warning!
with pytest.warns(AstropyDeprecationWarning, match="The use of"):
tete_alt = _apparent_position_in_true_coordinates(moon)
assert_allclose(tete_coo1.separation_3d(tete_alt), 0 * u.mm, atol=100 * u.mm)
def test_straight_overhead():
"""
With a precise CIRS<->Observed transformation this should give Alt=90 exactly
If the CIRS self-transform breaks it won't, due to improper treatment of aberration
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=10.0 * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=0.0 * u.km)
# An object that appears straight overhead - FOR A GEOCENTRIC OBSERVER.
# Note, this won't be overhead for a topocentric observer because of
# aberration.
cirs_geo = obj.get_itrs(t).transform_to(CIRS(obstime=t))
# now get the Geocentric CIRS position of observatory
obsrepr = home.get_itrs(t).transform_to(CIRS(obstime=t)).cartesian
# topocentric CIRS position of a straight overhead object
cirs_repr = cirs_geo.cartesian - obsrepr
# create a CIRS object that appears straight overhead for a TOPOCENTRIC OBSERVER
topocentric_cirs_frame = CIRS(obstime=t, location=home)
cirs_topo = topocentric_cirs_frame.realize_frame(cirs_repr)
# Check AltAz (though Azimuth can be anything so is not tested).
aa = cirs_topo.transform_to(AltAz(obstime=t, location=home))
assert_allclose(aa.alt, 90 * u.deg, atol=1 * u.uas, rtol=0)
# Check HADec.
hd = cirs_topo.transform_to(HADec(obstime=t, location=home))
assert_allclose(hd.ha, 0 * u.hourangle, atol=1 * u.uas, rtol=0)
assert_allclose(hd.dec, 52 * u.deg, atol=1 * u.uas, rtol=0)
def test_itrs_straight_overhead():
"""
With a precise ITRS<->Observed transformation this should give Alt=90 exactly
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=10.0 * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=0.0 * u.km)
# Check AltAz (though Azimuth can be anything so is not tested).
aa = obj.get_itrs(t, location=home).transform_to(AltAz(obstime=t, location=home))
assert_allclose(aa.alt, 90 * u.deg, atol=1 * u.uas, rtol=0)
# Check HADec.
hd = obj.get_itrs(t, location=home).transform_to(HADec(obstime=t, location=home))
assert_allclose(hd.ha, 0 * u.hourangle, atol=1 * u.uas, rtol=0)
assert_allclose(hd.dec, 52 * u.deg, atol=1 * u.uas, rtol=0)
def jplephem_ge(minversion):
"""Check if jplephem is installed and has version >= minversion."""
# This is a separate routine since somehow with pyinstaller the stanza
# not HAS_JPLEPHEM or metadata.version('jplephem') < '2.15'
# leads to a module not found error.
try:
return HAS_JPLEPHEM and metadata.version("jplephem") >= minversion
except Exception:
return False
@pytest.mark.remote_data
@pytest.mark.skipif(not jplephem_ge("2.15"), reason="requires jplephem >= 2.15")
def test_aa_hd_high_precision():
"""These tests are provided by @mkbrewer - see issue #10356.
The code that produces them agrees very well (<0.5 mas) with SkyField once Polar motion
is turned off, but SkyField does not include polar motion, so a comparison to Skyfield
or JPL Horizons will be ~1" off.
The absence of polar motion within Skyfield and the disagreement between Skyfield and Horizons
make high precision comparisons to those codes difficult.
Updated 2020-11-29, after the comparison between codes became even better,
down to 100 nas.
Updated 2023-02-14, after IERS changes the IERS B format and analysis,
causing small deviations.
NOTE: the agreement reflects consistency in approach between two codes,
not necessarily absolute precision. If this test starts failing, the
tolerance can and should be weakened *if* it is clear that the change is
due to an improvement (e.g., a new IAU precession model).
"""
lat = -22.959748 * u.deg
lon = -67.787260 * u.deg
elev = 5186 * u.m
loc = EarthLocation.from_geodetic(lon, lat, elev)
# Note: at this level of precision for the comparison, we have to include
# the location in the time, as it influences the transformation to TDB.
t = Time("2017-04-06T00:00:00.0", location=loc)
with solar_system_ephemeris.set("de430"):
moon = get_body("moon", t, loc)
moon_aa = moon.transform_to(AltAz(obstime=t, location=loc))
moon_hd = moon.transform_to(HADec(obstime=t, location=loc))
# Numbers from
# https://github.com/astropy/astropy/pull/11073#issuecomment-735486271
# updated in https://github.com/astropy/astropy/issues/11683
# and again after the IERS_B change.
TARGET_AZ, TARGET_EL = 15.032673662647138 * u.deg, 50.303110087520054 * u.deg
TARGET_DISTANCE = 376252.88325051306 * u.km
assert_allclose(moon_aa.az, TARGET_AZ, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_aa.alt, TARGET_EL, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_aa.distance, TARGET_DISTANCE, atol=0.1 * u.mm, rtol=0)
ha, dec = erfa.ae2hd(
moon_aa.az.to_value(u.radian),
moon_aa.alt.to_value(u.radian),
lat.to_value(u.radian),
)
ha = u.Quantity(ha, u.radian, copy=False)
dec = u.Quantity(dec, u.radian, copy=False)
assert_allclose(moon_hd.ha, ha, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_hd.dec, dec, atol=0.1 * u.uas, rtol=0)
def test_aa_high_precision_nodata():
"""
These tests are designed to ensure high precision alt-az transforms.
They are a slight fudge since the target values come from astropy itself. They are generated
with a version of the code that passes the tests above, but for the internal solar system
ephemerides to avoid the use of remote data.
"""
# Last updated when the new IERS B format and analysis was introduced.
TARGET_AZ, TARGET_EL = 15.0323151 * u.deg, 50.30271925 * u.deg
lat = -22.959748 * u.deg
lon = -67.787260 * u.deg
elev = 5186 * u.m
loc = EarthLocation.from_geodetic(lon, lat, elev)
t = Time("2017-04-06T00:00:00.0")
moon = get_body("moon", t, loc)
moon_aa = moon.transform_to(AltAz(obstime=t, location=loc))
assert_allclose(moon_aa.az - TARGET_AZ, 0 * u.mas, atol=0.5 * u.mas)
assert_allclose(moon_aa.alt - TARGET_EL, 0 * u.mas, atol=0.5 * u.mas)
class TestGetLocationGCRS:
# TETE and CIRS use get_location_gcrs to get obsgeoloc and obsgeovel
# with knowledge of some of the matrices. Check that this is consistent
# with a direct transformation.
def setup_class(cls):
cls.loc = loc = EarthLocation.from_geodetic(
np.linspace(0, 360, 6) * u.deg, np.linspace(-90, 90, 6) * u.deg, 100 * u.m
)
cls.obstime = obstime = Time(np.linspace(2000, 2010, 6), format="jyear")
# Get comparison via a full transformation. We do not use any methods
# of EarthLocation, since those depend on the fast transform.
loc_itrs = ITRS(loc.x, loc.y, loc.z, obstime=obstime)
zeros = np.broadcast_to(0.0 * (u.km / u.s), (3,) + loc_itrs.shape, subok=True)
loc_itrs.data.differentials["s"] = CartesianDifferential(zeros)
loc_gcrs_cart = loc_itrs.transform_to(GCRS(obstime=obstime)).cartesian
cls.obsgeoloc = loc_gcrs_cart.without_differentials()
cls.obsgeovel = loc_gcrs_cart.differentials["s"].to_cartesian()
def check_obsgeo(self, obsgeoloc, obsgeovel):
assert_allclose(obsgeoloc.xyz, self.obsgeoloc.xyz, atol=0.1 * u.um, rtol=0.0)
assert_allclose(
obsgeovel.xyz, self.obsgeovel.xyz, atol=0.1 * u.mm / u.s, rtol=0.0
)
def test_get_gcrs_posvel(self):
# Really just a sanity check
self.check_obsgeo(*self.loc.get_gcrs_posvel(self.obstime))
def test_tete_quick(self):
# Following copied from intermediate_rotation_transforms.gcrs_to_tete
rbpn = erfa.pnm06a(*get_jd12(self.obstime, "tt"))
loc_gcrs_frame = get_location_gcrs(
self.loc, self.obstime, tete_to_itrs_mat(self.obstime, rbpn=rbpn), rbpn
)
self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel)
def test_cirs_quick(self):
cirs_frame = CIRS(location=self.loc, obstime=self.obstime)
# Following copied from intermediate_rotation_transforms.gcrs_to_cirs
pmat = gcrs_to_cirs_mat(cirs_frame.obstime)
loc_gcrs_frame = get_location_gcrs(
self.loc, self.obstime, cirs_to_itrs_mat(cirs_frame.obstime), pmat
)
self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel)
|
6047a58f3519dff80ee255b0bc09d035d570be29b0eb709ce5189c79fe3a6343 | import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, Latitude, Longitude
from astropy.coordinates.sites import (
SiteRegistry,
get_builtin_sites,
get_downloaded_sites,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import allclose as quantity_allclose
from astropy.utils.exceptions import AstropyUserWarning
@pytest.fixture
def earthlocation_without_site_registry(monkeypatch):
monkeypatch.setattr(EarthLocation, "_site_registry", None)
def test_builtin_sites():
reg = get_builtin_sites()
greenwich = reg["greenwich"]
lon, lat, el = greenwich.to_geodetic()
assert_quantity_allclose(lon, Longitude("0:0:0", unit=u.deg), atol=10 * u.arcsec)
assert_quantity_allclose(lat, Latitude("51:28:40", unit=u.deg), atol=1 * u.arcsec)
assert_quantity_allclose(el, 46 * u.m, atol=1 * u.m)
names = reg.names
assert "greenwich" in names
assert "example_site" in names
with pytest.raises(
KeyError,
match="Site 'nonexistent' not in database. Use the 'names' attribute to see",
):
reg["nonexistent"]
@pytest.mark.remote_data(source="astropy")
def test_online_sites():
reg = get_downloaded_sites()
keck = reg["keck"]
lon, lat, el = keck.to_geodetic()
assert_quantity_allclose(
lon, -Longitude("155:28.7", unit=u.deg), atol=0.001 * u.deg
)
assert_quantity_allclose(lat, Latitude("19:49.7", unit=u.deg), atol=0.001 * u.deg)
assert_quantity_allclose(el, 4160 * u.m, atol=1 * u.m)
names = reg.names
assert "keck" in names
assert "ctio" in names
# The JSON file contains `name` and `aliases` for each site, and astropy
# should use names from both, but not empty strings [#12721].
assert "" not in names
assert "Royal Observatory Greenwich" in names
with pytest.raises(
KeyError,
match="Site 'nonexistent' not in database. Use the 'names' attribute to see",
):
reg["nonexistent"]
with pytest.raises(
KeyError,
match="Site 'kec' not in database. Use the 'names' attribute to see available",
):
reg["kec"]
@pytest.mark.remote_data(source="astropy")
# this will *try* the online so we have to make it remote_data, even though it
# could fall back on the non-remote version
def test_EarthLocation_basic():
greenwichel = EarthLocation.of_site("greenwich")
lon, lat, el = greenwichel.to_geodetic()
assert_quantity_allclose(lon, Longitude("0:0:0", unit=u.deg), atol=10 * u.arcsec)
assert_quantity_allclose(lat, Latitude("51:28:40", unit=u.deg), atol=1 * u.arcsec)
assert_quantity_allclose(el, 46 * u.m, atol=1 * u.m)
names = EarthLocation.get_site_names()
assert "greenwich" in names
assert "example_site" in names
with pytest.raises(
KeyError,
match="Site 'nonexistent' not in database. Use EarthLocation.get_site_names",
):
EarthLocation.of_site("nonexistent")
@pytest.mark.parametrize(
"class_method,args",
[(EarthLocation.get_site_names, []), (EarthLocation.of_site, ["greenwich"])],
)
def test_Earthlocation_refresh_cache_is_mandatory_kwarg(class_method, args):
with pytest.raises(
TypeError,
match=(
rf".*{class_method.__name__}\(\) takes [12] positional "
"arguments? but [23] were given$"
),
):
class_method(*args, False)
@pytest.mark.parametrize(
"class_method,args",
[(EarthLocation.get_site_names, []), (EarthLocation.of_site, ["greenwich"])],
)
@pytest.mark.parametrize("refresh_cache", [False, True])
def test_Earthlocation_refresh_cache(class_method, args, refresh_cache, monkeypatch):
def get_site_registry_monkeypatched(force_download, force_builtin=False):
assert force_download is refresh_cache
return get_builtin_sites()
monkeypatch.setattr(
EarthLocation, "_get_site_registry", get_site_registry_monkeypatched
)
class_method(*args, refresh_cache=refresh_cache)
@pytest.mark.parametrize(
"force_download,expectation",
[
(
False,
pytest.warns(
AstropyUserWarning, match=r"use the option 'refresh_cache=True'\.$"
),
),
(True, pytest.raises(OSError, match=r"^fail for test$")),
("url", pytest.raises(OSError, match=r"^fail for test$")),
],
)
@pytest.mark.parametrize(
"class_method,args",
[(EarthLocation.get_site_names, []), (EarthLocation.of_site, ["greenwich"])],
)
def test_EarthLocation_site_registry_connection_fail(
force_download,
expectation,
class_method,
args,
earthlocation_without_site_registry,
monkeypatch,
):
def fail_download(*args, **kwargs):
raise OSError("fail for test")
monkeypatch.setattr(get_downloaded_sites, "__code__", fail_download.__code__)
with expectation:
class_method(*args, refresh_cache=force_download)
@pytest.mark.parametrize(
"registry_kwarg",
["force_builtin", pytest.param("force_download", marks=pytest.mark.remote_data)],
)
def test_EarthLocation_state(earthlocation_without_site_registry, registry_kwarg):
EarthLocation._get_site_registry(**{registry_kwarg: True})
assert isinstance(EarthLocation._site_registry, SiteRegistry)
oldreg = EarthLocation._site_registry
assert oldreg is EarthLocation._get_site_registry()
assert oldreg is not EarthLocation._get_site_registry(**{registry_kwarg: True})
def test_registry():
reg = SiteRegistry()
assert len(reg.names) == 0
loc = EarthLocation.from_geodetic(lat=1 * u.deg, lon=2 * u.deg, height=3 * u.km)
reg.add_site(["sitea", "site A"], loc)
assert len(reg.names) == 2
assert reg["SIteA"] is loc
assert reg["sIte a"] is loc
def test_non_EarthLocation():
"""
A regression test for a typo bug pointed out at the bottom of
https://github.com/astropy/astropy/pull/4042
"""
class EarthLocation2(EarthLocation):
pass
# This lets keeps us from needing to do remote_data
# note that this does *not* mess up the registry for EarthLocation because
# registry is cached on a per-class basis
EarthLocation2._get_site_registry(force_builtin=True)
el2 = EarthLocation2.of_site("greenwich")
assert type(el2) is EarthLocation2
assert el2.info.name == "Royal Observatory Greenwich"
def check_builtin_matches_remote(download_url=True):
"""
This function checks that the builtin sites registry is consistent with the
remote registry (or a registry at some other location).
Note that current this is *not* run by the testing suite (because it
doesn't start with "test", and is instead meant to be used as a check
before merging changes in astropy-data)
"""
builtin_registry = EarthLocation._get_site_registry(force_builtin=True)
dl_registry = EarthLocation._get_site_registry(force_download=download_url)
in_dl = {}
matches = {}
for name in builtin_registry.names:
in_dl[name] = name in dl_registry
if in_dl[name]:
matches[name] = quantity_allclose(
builtin_registry[name].geocentric, dl_registry[name].geocentric
)
else:
matches[name] = False
if not all(matches.values()):
# this makes sure we actually see which don't match
print("In builtin registry but not in download:")
for name in in_dl:
if not in_dl[name]:
print(" ", name)
print("In both but not the same value:")
for name in matches:
if not matches[name] and in_dl[name]:
print(
" ",
name,
"builtin:",
builtin_registry[name],
"download:",
dl_registry[name],
)
assert False, (
"Builtin and download registry aren't consistent - failures printed to"
" stdout"
)
def test_meta_present():
assert (
get_builtin_sites()["greenwich"].info.meta["source"]
== "Ordnance Survey via http://gpsinformation.net/main/greenwich.htm and UNESCO"
)
|
dda642170dd3e9d47b823dbe5d11817eee8b7578bf6194d344f0282589a1c698 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for miscellaneous functionality in the `funcs` module
"""
import numpy as np
import pytest
from numpy import testing as npt
from astropy import units as u
from astropy.coordinates import FK5, ICRS, SkyCoord
from astropy.coordinates import representation as r
from astropy.coordinates.funcs import (
concatenate,
concatenate_representations,
get_constellation,
get_sun,
)
from astropy.time import Time
CARTESIAN_POS = r.CartesianRepresentation([1, 2, 3] * u.kpc)
CARTESIAN_VEL = r.CartesianDifferential([8, 9, 10] * u.km / u.s)
CARTESIAN_POS_AND_VEL = CARTESIAN_POS.with_differentials(CARTESIAN_VEL)
RADIAL_VEL = r.RadialDifferential(1 * u.km / u.s)
SPHERICAL_COS_LAT_VEL = r.SphericalCosLatDifferential(
1 * u.mas / u.yr, 2 * u.mas / u.yr, 3 * u.km / u.s
)
SPHERICAL_POS = r.SphericalRepresentation(
lon=1 * u.deg, lat=2.0 * u.deg, distance=10 * u.pc
)
UNIT_SPHERICAL_POS = r.UnitSphericalRepresentation(lon=1 * u.deg, lat=2.0 * u.deg)
CARTESIAN_POS_2D_ARR = r.CartesianRepresentation(np.ones((3, 100)) * u.kpc)
CARTESIAN_POS_3D_ARR = r.CartesianRepresentation(np.ones((3, 16, 8)) * u.kpc)
UNIT_SPHERICAL_COS_LAT_VEL = r.UnitSphericalCosLatDifferential(
1 * u.mas / u.yr, 2 * u.mas / u.yr
)
CARTESIAN_VEL_2D_ARR = r.CartesianDifferential(*np.ones((3, 100)) * u.km / u.s)
CARTESIAN_VEL_3D_ARR = r.CartesianDifferential(*np.ones((3, 16, 8)) * u.km / u.s)
def test_sun():
"""
Test that `get_sun` works and it behaves roughly as it should (in GCRS)
"""
northern_summer_solstice = Time("2010-6-21")
northern_winter_solstice = Time("2010-12-21")
equinox_1 = Time("2010-3-21")
equinox_2 = Time("2010-9-21")
gcrs1 = get_sun(equinox_1)
assert np.abs(gcrs1.dec.deg) < 1
gcrs2 = get_sun(
Time([northern_summer_solstice, equinox_2, northern_winter_solstice])
)
assert np.all(np.abs(gcrs2.dec - [23.5, 0, -23.5] * u.deg) < 1 * u.deg)
def test_constellations(recwarn):
inuma = ICRS(9 * u.hour, 65 * u.deg)
n_prewarn = len(recwarn)
res = get_constellation(inuma)
res_short = get_constellation(inuma, short_name=True)
assert len(recwarn) == n_prewarn # neither version should not make warnings
assert res == "Ursa Major"
assert res_short == "UMa"
assert isinstance(res, str) or getattr(res, "shape", None) == tuple()
# these are taken from the ReadMe for Roman 1987
ras = [9, 23.5, 5.12, 9.4555, 12.8888, 15.6687, 19, 6.2222]
decs = [65, -20, 9.12, -19.9, 22, -12.1234, -40, -81.1234]
shortnames = ["UMa", "Aqr", "Ori", "Hya", "Com", "Lib", "CrA", "Men"]
testcoos = FK5(ras * u.hour, decs * u.deg, equinox="B1950")
npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames)
# test on a SkyCoord, *and* test Boötes, which is special in that it has a
# non-ASCII character
boores = get_constellation(SkyCoord(15 * u.hour, 30 * u.deg, frame="icrs"))
assert boores == "Boötes"
assert isinstance(boores, str) or getattr(boores, "shape", None) == tuple()
@pytest.mark.xfail
def test_constellation_edge_cases():
# Test edge cases close to borders, using B1875.0 coordinates
# Look for HMS / DMS roundoff-to-decimal issues from Roman (1987) data,
# and misuse of PrecessedGeocentric, as documented in
# https://github.com/astropy/astropy/issues/9855
# Define eight test points.
# The first four cross the boundary at 06h14m30 == 6.2416666666666... hours
# with Monoceros on the west side of Orion at Dec +3.0.
ras = [6.24100, 6.24160, 6.24166, 6.24171]
# aka ['6h14m27.6s' '6h14m29.76s' '6h14m29.976s' '6h14m30.156s']
decs = [3.0, 3.0, 3.0, 3.0]
# Correct constellations for given RA/Dec coordinates
shortnames = ["Ori", "Ori", "Ori", "Mon"]
# The second four sample northward along RA 22 hours, crossing the boundary
# at 86° 10' == 86.1666... degrees between Cepheus and Ursa Minor
decs += [86.16, 86.1666, 86.16668, 86.1668]
ras += [22.0, 22.0, 22.0, 22.0]
shortnames += ["Cep", "Cep", "Umi", "Umi"]
testcoos = FK5(ras * u.hour, decs * u.deg, equinox="B1875")
npt.assert_equal(
get_constellation(testcoos, short_name=True),
shortnames,
"get_constellation() error: misusing Roman approximations, vs IAU boundaries"
" from Delporte?",
)
# TODO: When that's fixed, add other tests with coords that are in different constellations
# depending on equinox
def test_concatenate():
# Just positions
fk5 = FK5(1 * u.deg, 2 * u.deg)
sc = SkyCoord(3 * u.deg, 4 * u.deg, frame="fk5")
res = concatenate([fk5, sc])
np.testing.assert_allclose(res.ra, [1, 3] * u.deg)
np.testing.assert_allclose(res.dec, [2, 4] * u.deg)
with pytest.raises(TypeError):
concatenate(fk5)
with pytest.raises(TypeError):
concatenate(1 * u.deg)
# positions and velocities
fr = ICRS(
ra=10 * u.deg,
dec=11.0 * u.deg,
pm_ra_cosdec=12 * u.mas / u.yr,
pm_dec=13 * u.mas / u.yr,
)
sc = SkyCoord(
ra=20 * u.deg,
dec=21.0 * u.deg,
pm_ra_cosdec=22 * u.mas / u.yr,
pm_dec=23 * u.mas / u.yr,
)
res = concatenate([fr, sc])
with pytest.raises(ValueError):
concatenate([fr, fk5])
fr2 = ICRS(ra=10 * u.deg, dec=11.0 * u.deg)
with pytest.raises(ValueError):
concatenate([fr, fr2])
@pytest.mark.parametrize(
"rep",
(
CARTESIAN_POS,
SPHERICAL_POS,
UNIT_SPHERICAL_POS,
CARTESIAN_POS_2D_ARR,
CARTESIAN_POS_3D_ARR,
CARTESIAN_POS_AND_VEL,
SPHERICAL_POS.with_differentials(SPHERICAL_COS_LAT_VEL),
UNIT_SPHERICAL_POS.with_differentials(SPHERICAL_COS_LAT_VEL),
UNIT_SPHERICAL_POS.with_differentials(UNIT_SPHERICAL_COS_LAT_VEL),
UNIT_SPHERICAL_POS.with_differentials({"s": RADIAL_VEL}),
CARTESIAN_POS_2D_ARR.with_differentials(CARTESIAN_VEL_2D_ARR),
CARTESIAN_POS_3D_ARR.with_differentials(CARTESIAN_VEL_3D_ARR),
),
)
@pytest.mark.parametrize("n", (2, 4))
def test_concatenate_representations(rep, n):
# Test that combining with itself succeeds
expected_shape = (n * rep.shape[0],) + rep.shape[1:] if rep.shape else (n,)
tmp = concatenate_representations(n * (rep,))
assert tmp.shape == expected_shape
if "s" in rep.differentials:
assert tmp.differentials["s"].shape == expected_shape
def test_concatenate_representations_invalid_input():
# Test that combining pairs fails
with pytest.raises(TypeError):
concatenate_representations((CARTESIAN_POS, SPHERICAL_POS))
with pytest.raises(ValueError):
concatenate_representations((CARTESIAN_POS, CARTESIAN_POS_AND_VEL))
# Check that passing in a single object fails
with pytest.raises(TypeError):
concatenate_representations(CARTESIAN_POS)
def test_concatenate_representations_different_units():
concat = concatenate_representations(
[r.CartesianRepresentation([1, 2, 3] * unit) for unit in (u.pc, u.kpc)]
)
assert np.array_equal(concat.xyz, [[1, 1000], [2, 2000], [3, 3000]] * u.pc)
|
e923722340ccae98b152ac7bf271b8a6bf4ff9026f81b066337d44456c499f95 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import warnings
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.io import registry as io_registry
from astropy.table import Column, MaskedColumn, Table, meta, serialize
from astropy.time import Time
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import BinTableHDU, GroupsHDU, HDUList, TableHDU
from . import append as fits_append
from .column import KEYWORD_NAMES, _fortran_to_python_format
from .convenience import table_to_hdu
from .hdu.hdulist import FITS_SIGNATURE
from .hdu.hdulist import fitsopen as fits_open
from .util import first
# Keywords to remove for all tables that are read in
REMOVE_KEYWORDS = [
"XTENSION",
"BITPIX",
"NAXIS",
"NAXIS1",
"NAXIS2",
"PCOUNT",
"GCOUNT",
"TFIELDS",
"THEAP",
]
# Column-specific keywords regex
COLUMN_KEYWORD_REGEXP = "(" + "|".join(KEYWORD_NAMES) + ")[0-9]+"
def is_column_keyword(keyword):
return re.match(COLUMN_KEYWORD_REGEXP, keyword) is not None
def is_fits(origin, filepath, fileobj, *args, **kwargs):
"""
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file.
"""
if fileobj is not None:
pos = fileobj.tell()
sig = fileobj.read(30)
fileobj.seek(pos)
return sig == FITS_SIGNATURE
elif filepath is not None:
return filepath.lower().endswith(
(".fits", ".fits.gz", ".fit", ".fit.gz", ".fts", ".fts.gz")
)
return isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU))
def _decode_mixins(tbl):
"""Decode a Table ``tbl`` that has astropy Columns + appropriate meta-data into
the corresponding table with mixin columns (as appropriate).
"""
# If available read in __serialized_columns__ meta info which is stored
# in FITS COMMENTS between two sentinels.
try:
i0 = tbl.meta["comments"].index("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
i1 = tbl.meta["comments"].index("--END-ASTROPY-SERIALIZED-COLUMNS--")
except (ValueError, KeyError):
return tbl
# The YAML data are split into COMMENT cards, with lines longer than 70
# characters being split with a continuation character \ (backslash).
# Strip the backslashes and join together.
continuation_line = False
lines = []
for line in tbl.meta["comments"][i0 + 1 : i1]:
if continuation_line:
lines[-1] = lines[-1] + line[:70]
else:
lines.append(line[:70])
continuation_line = len(line) == 71
del tbl.meta["comments"][i0 : i1 + 1]
if not tbl.meta["comments"]:
del tbl.meta["comments"]
info = meta.get_header_from_yaml(lines)
# Add serialized column information to table meta for use in constructing mixins
tbl.meta["__serialized_columns__"] = info["meta"]["__serialized_columns__"]
# Use the `datatype` attribute info to update column attributes that are
# NOT already handled via standard FITS column keys (name, dtype, unit).
for col in info["datatype"]:
for attr in ["description", "meta"]:
if attr in col:
setattr(tbl[col["name"]].info, attr, col[attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
tbl = serialize._construct_mixins_from_columns(tbl)
return tbl
def read_table_fits(
input,
hdu=None,
astropy_native=False,
memmap=False,
character_as_bytes=True,
unit_parse_strict="warn",
mask_invalid=True,
):
"""
Read a Table object from an FITS file.
If the ``astropy_native`` argument is ``True``, then input FITS columns
which are representations of an astropy core object will be converted to
that class and stored in the ``Table`` as "mixin columns". Currently this
is limited to FITS columns which adhere to the FITS Time standard, in which
case they will be converted to a `~astropy.time.Time` column in the output
table.
Parameters
----------
input : str or file-like or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
astropy_native : bool, optional
Read in FITS columns as native astropy objects where possible instead
of standard Table Column objects. Default is False.
memmap : bool, optional
Whether to use memory mapping, which accesses data on disk as needed. If
you are only accessing part of the data, this is often more efficient.
If you want to access all the values in the table, and you are able to
fit the table in memory, you may be better off leaving memory mapping
off. However, if your table would not fit in memory, you should set this
to `True`.
When set to `True` then ``mask_invalid`` is set to `False` since the
masking would cause loading the full data array.
character_as_bytes : bool, optional
If `True`, string columns are stored as Numpy byte arrays (dtype ``S``)
and are converted on-the-fly to unicode strings when accessing
individual elements. If you need to use Numpy unicode arrays (dtype
``U``) internally, you should set this to `False`, but note that this
will use more memory. If set to `False`, string columns will not be
memory-mapped even if ``memmap`` is `True`.
unit_parse_strict : str, optional
Behaviour when encountering invalid column units in the FITS header.
Default is "warn", which will emit a ``UnitsWarning`` and create a
:class:`~astropy.units.core.UnrecognizedUnit`.
Values are the ones allowed by the ``parse_strict`` argument of
:class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``.
mask_invalid : bool, optional
By default the code masks NaNs in float columns and empty strings in
string columns. Set this parameter to `False` to avoid the performance
penalty of doing this masking step. The masking is always deactivated
when using ``memmap=True`` (see above).
"""
if isinstance(input, HDUList):
# Parse all table objects
tables = dict()
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
tables[ihdu] = hdu_item
if len(tables) > 1:
if hdu is None:
warnings.warn(
"hdu= was not specified but multiple tables"
" are present, reading in first available"
f" table (hdu={first(tables)})",
AstropyUserWarning,
)
hdu = first(tables)
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in tables:
table = tables[hdu]
else:
raise ValueError(f"No table found in hdu={hdu}")
elif len(tables) == 1:
if hdu is not None:
msg = None
try:
hdi = input.index_of(hdu)
except KeyError:
msg = f"Specified hdu={hdu} not found"
else:
if hdi >= len(input):
msg = f"Specified hdu={hdu} not found"
elif hdi not in tables:
msg = f"No table found in specified hdu={hdu}"
if msg is not None:
warnings.warn(
f"{msg}, reading in first available table "
f"(hdu={first(tables)}) instead. This will"
" result in an error in future versions!",
AstropyDeprecationWarning,
)
table = tables[first(tables)]
else:
raise ValueError("No table found")
elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):
table = input
else:
if memmap:
# using memmap is not compatible with masking invalid value by
# default so we deactivate the masking
mask_invalid = False
hdulist = fits_open(input, character_as_bytes=character_as_bytes, memmap=memmap)
try:
return read_table_fits(
hdulist,
hdu=hdu,
astropy_native=astropy_native,
unit_parse_strict=unit_parse_strict,
mask_invalid=mask_invalid,
)
finally:
hdulist.close()
# In the loop below we access the data using data[col.name] rather than
# col.array to make sure that the data is scaled correctly if needed.
data = table.data
columns = []
for col in data.columns:
# Check if column is masked. Here, we make a guess based on the
# presence of FITS mask values. For integer columns, this is simply
# the null header, for float and complex, the presence of NaN, and for
# string, empty strings.
# Since Multi-element columns with dtypes such as '2f8' have a subdtype,
# we should look up the type of column on that.
masked = mask = False
coltype = col.dtype.subdtype[0].type if col.dtype.subdtype else col.dtype.type
if col.null is not None:
mask = data[col.name] == col.null
# Return a MaskedColumn even if no elements are masked so
# we roundtrip better.
masked = True
elif mask_invalid and issubclass(coltype, np.inexact):
mask = np.isnan(data[col.name])
elif mask_invalid and issubclass(coltype, np.character):
mask = col.array == b""
if masked or np.any(mask):
column = MaskedColumn(
data=data[col.name], name=col.name, mask=mask, copy=False
)
else:
column = Column(data=data[col.name], name=col.name, copy=False)
# Copy over units
if col.unit is not None:
column.unit = u.Unit(
col.unit, format="fits", parse_strict=unit_parse_strict
)
# Copy over display format
if col.disp is not None:
column.format = _fortran_to_python_format(col.disp)
columns.append(column)
# Create Table object
t = Table(columns, copy=False)
# TODO: deal properly with unsigned integers
hdr = table.header
if astropy_native:
# Avoid circular imports, and also only import if necessary.
from .fitstime import fits_to_time
hdr = fits_to_time(hdr, t)
for key, value, comment in hdr.cards:
if key in ["COMMENT", "HISTORY"]:
# Convert to io.ascii format
if key == "COMMENT":
key = "comments"
if key in t.meta:
t.meta[key].append(value)
else:
t.meta[key] = [value]
elif key in t.meta: # key is duplicate
if isinstance(t.meta[key], list):
t.meta[key].append(value)
else:
t.meta[key] = [t.meta[key], value]
elif is_column_keyword(key) or key in REMOVE_KEYWORDS:
pass
else:
t.meta[key] = value
# TODO: implement masking
# Decode any mixin columns that have been stored as standard Columns.
t = _decode_mixins(t)
return t
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
# Determine if information will be lost without serializing meta. This is hardcoded
# to the set difference between column info attributes and what FITS can store
# natively (name, dtype, unit). See _get_col_attributes() in table/meta.py for where
# this comes from.
info_lost = any(
any(
getattr(col.info, attr, None) not in (None, {})
for attr in ("description", "meta")
)
for col in tbl.itercols()
)
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table. This ignores
# Time-subclass columns and leave them in the table so that the downstream
# FITS Time handling does the right thing.
with serialize_context_as("fits"):
encode_tbl = serialize.represent_mixins_as_columns(tbl, exclude_classes=(Time,))
# If the encoded table is unchanged then there were no mixins. But if there
# is column metadata (format, description, meta) that would be lost, then
# still go through the serialized columns machinery.
if encode_tbl is tbl and not info_lost:
return tbl
# Copy the meta dict if it was not copied by represent_mixins_as_columns.
# We will modify .meta['comments'] below and we do not want to see these
# comments in the input table.
if encode_tbl is tbl:
meta_copy = deepcopy(tbl.meta)
encode_tbl = Table(tbl.columns, meta=meta_copy, copy=False)
# Get the YAML serialization of information describing the table columns.
# This is re-using ECSV code that combined existing table.meta with with
# the extra __serialized_columns__ key. For FITS the table.meta is handled
# by the native FITS connect code, so don't include that in the YAML
# output.
ser_col = "__serialized_columns__"
# encode_tbl might not have a __serialized_columns__ key if there were no mixins,
# but machinery below expects it to be available, so just make an empty dict.
encode_tbl.meta.setdefault(ser_col, {})
tbl_meta_copy = encode_tbl.meta.copy()
try:
encode_tbl.meta = {ser_col: encode_tbl.meta[ser_col]}
meta_yaml_lines = meta.get_yaml_from_table(encode_tbl)
finally:
encode_tbl.meta = tbl_meta_copy
del encode_tbl.meta[ser_col]
if "comments" not in encode_tbl.meta:
encode_tbl.meta["comments"] = []
encode_tbl.meta["comments"].append("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
for line in meta_yaml_lines:
if len(line) == 0:
lines = [""]
else:
# Split line into 70 character chunks for COMMENT cards
idxs = list(range(0, len(line) + 70, 70))
lines = [line[i0:i1] + "\\" for i0, i1 in zip(idxs[:-1], idxs[1:])]
lines[-1] = lines[-1][:-1]
encode_tbl.meta["comments"].extend(lines)
encode_tbl.meta["comments"].append("--END-ASTROPY-SERIALIZED-COLUMNS--")
return encode_tbl
def write_table_fits(input, output, overwrite=False, append=False):
"""
Write a Table object to a FITS file.
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
append : bool
Whether to append the table to an existing file
"""
# Encode any mixin columns into standard Columns.
input = _encode_mixins(input)
table_hdu = table_to_hdu(input, character_as_bytes=True)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
elif not append:
raise OSError(NOT_OVERWRITING_MSG.format(output))
if append:
# verify=False stops it reading and checking the existing file.
fits_append(output, table_hdu.data, table_hdu.header, verify=False)
else:
table_hdu.writeto(output)
io_registry.register_reader("fits", Table, read_table_fits)
io_registry.register_writer("fits", Table, write_table_fits)
io_registry.register_identifier("fits", Table, is_fits)
|
f6323cf4e0ede5bcd1cddd42969de3e5e1fb70ffb5e3c5daebe8e219d2a74d2f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Facilities for diffing two FITS files. Includes objects for diffing entire
FITS files, individual HDUs, FITS headers, or just FITS data.
Used to implement the fitsdiff program.
"""
import fnmatch
import glob
import io
import operator
import os
import os.path
import textwrap
from collections import defaultdict
from inspect import signature
from itertools import islice
import numpy as np
from astropy import __version__
from astropy.utils.diff import (
diff_values,
fixed_width_indent,
report_diff_values,
where_not_allclose,
)
from astropy.utils.misc import NOT_OVERWRITING_MSG
from .card import BLANK_CARD, Card
# HDUList is used in one of the doctests
from .hdu.hdulist import HDUList, fitsopen # pylint: disable=W0611
from .hdu.table import _TableLikeHDU
from .header import Header
from .util import path_like
__all__ = [
"FITSDiff",
"HDUDiff",
"HeaderDiff",
"ImageDataDiff",
"RawDataDiff",
"TableDataDiff",
]
# Column attributes of interest for comparison
_COL_ATTRS = [
("unit", "units"),
("null", "null values"),
("bscale", "bscales"),
("bzero", "bzeros"),
("disp", "display formats"),
("dim", "dimensions"),
]
class _BaseDiff:
"""
Base class for all FITS diff objects.
When instantiating a FITS diff object, the first two arguments are always
the two objects to diff (two FITS files, two FITS headers, etc.).
Instantiating a ``_BaseDiff`` also causes the diff itself to be executed.
The returned ``_BaseDiff`` instance has a number of attribute that describe
the results of the diff operation.
The most basic attribute, present on all ``_BaseDiff`` instances, is
``.identical`` which is `True` if the two objects being compared are
identical according to the diff method for objects of that type.
"""
def __init__(self, a, b):
"""
The ``_BaseDiff`` class does not implement a ``_diff`` method and
should not be instantiated directly. Instead instantiate the
appropriate subclass of ``_BaseDiff`` for the objects being compared
(for example, use `HeaderDiff` to compare two `Header` objects.
"""
self.a = a
self.b = b
# For internal use in report output
self._fileobj = None
self._indent = 0
self._diff()
def __bool__(self):
"""
A ``_BaseDiff`` object acts as `True` in a boolean context if the two
objects compared are different. Otherwise it acts as `False`.
"""
return not self.identical
@classmethod
def fromdiff(cls, other, a, b):
"""
Returns a new Diff object of a specific subclass from an existing diff
object, passing on the values for any arguments they share in common
(such as ignore_keywords).
For example::
>>> from astropy.io import fits
>>> hdul1, hdul2 = fits.HDUList(), fits.HDUList()
>>> headera, headerb = fits.Header(), fits.Header()
>>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*'])
>>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb)
>>> list(hd.ignore_keywords)
['*']
"""
sig = signature(cls.__init__)
# The first 3 arguments of any Diff initializer are self, a, and b.
kwargs = {}
for arg in list(sig.parameters.keys())[3:]:
if hasattr(other, arg):
kwargs[arg] = getattr(other, arg)
return cls(a, b, **kwargs)
@property
def identical(self):
"""
`True` if all the ``.diff_*`` attributes on this diff instance are
empty, implying that no differences were found.
Any subclass of ``_BaseDiff`` must have at least one ``.diff_*``
attribute, which contains a non-empty value if and only if some
difference was found between the two objects being compared.
"""
return not any(
getattr(self, attr) for attr in self.__dict__ if attr.startswith("diff_")
)
def report(self, fileobj=None, indent=0, overwrite=False):
"""
Generates a text report on the differences (if any) between two
objects, and either returns it as a string or writes it to a file-like
object.
Parameters
----------
fileobj : file-like, string, or None, optional
If `None`, this method returns the report as a string. Otherwise it
returns `None` and writes the report to the given file-like object
(which must have a ``.write()`` method at a minimum), or to a new
file at the path specified.
indent : int
The number of 4 space tabs to indent the report.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Returns
-------
report : str or None
"""
return_string = False
filepath = None
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
if os.path.exists(fileobj) and not overwrite:
raise OSError(NOT_OVERWRITING_MSG.format(fileobj))
else:
filepath = fileobj
fileobj = open(filepath, "w")
elif fileobj is None:
fileobj = io.StringIO()
return_string = True
self._fileobj = fileobj
self._indent = indent # This is used internally by _writeln
try:
self._report()
finally:
if filepath:
fileobj.close()
if return_string:
return fileobj.getvalue()
def _writeln(self, text):
self._fileobj.write(fixed_width_indent(text, self._indent) + "\n")
def _diff(self):
raise NotImplementedError
def _report(self):
raise NotImplementedError
class FITSDiff(_BaseDiff):
"""Diff two FITS files by filename, or two `HDUList` objects.
`FITSDiff` objects have the following diff attributes:
- ``diff_hdu_count``: If the FITS files being compared have different
numbers of HDUs, this contains a 2-tuple of the number of HDUs in each
file.
- ``diff_hdus``: If any HDUs with the same index are different, this
contains a list of 2-tuples of the HDU index and the `HDUDiff` object
representing the differences between the two HDUs.
"""
def __init__(
self,
a,
b,
ignore_hdus=[],
ignore_keywords=[],
ignore_comments=[],
ignore_fields=[],
numdiffs=10,
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object.
b : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object to
compare to the first file.
ignore_hdus : sequence, optional
HDU names to ignore when comparing two FITS files or HDU lists; the
presence of these HDUs and their contents are ignored. Wildcard
strings may also be included in the list.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
if isinstance(a, (str, os.PathLike)):
try:
a = fitsopen(a)
except Exception as exc:
raise OSError(f"error opening file a ({a})") from exc
close_a = True
else:
close_a = False
if isinstance(b, (str, os.PathLike)):
try:
b = fitsopen(b)
except Exception as exc:
raise OSError(f"error opening file b ({b})") from exc
close_b = True
else:
close_b = False
# Normalize keywords/fields to ignore to upper case
self.ignore_hdus = {k.upper() for k in ignore_hdus}
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
# Some hdu names may be pattern wildcards. Find them.
self.ignore_hdu_patterns = set()
for name in list(self.ignore_hdus):
if name != "*" and glob.has_magic(name):
self.ignore_hdus.remove(name)
self.ignore_hdu_patterns.add(name)
self.diff_hdu_count = ()
self.diff_hdus = []
try:
super().__init__(a, b)
finally:
if close_a:
a.close()
if close_b:
b.close()
def _diff(self):
if len(self.a) != len(self.b):
self.diff_hdu_count = (len(self.a), len(self.b))
# Record filenames for use later in _report
self.filenamea = self.a.filename()
if not self.filenamea:
self.filenamea = f"<{self.a.__class__.__name__} object at {id(self.a):#x}>"
self.filenameb = self.b.filename()
if not self.filenameb:
self.filenameb = f"<{self.b.__class__.__name__} object at {id(self.b):#x}>"
if self.ignore_hdus:
self.a = HDUList([h for h in self.a if h.name not in self.ignore_hdus])
self.b = HDUList([h for h in self.b if h.name not in self.ignore_hdus])
if self.ignore_hdu_patterns:
a_names = [hdu.name for hdu in self.a]
b_names = [hdu.name for hdu in self.b]
for pattern in self.ignore_hdu_patterns:
a_ignored = fnmatch.filter(a_names, pattern)
self.a = HDUList([h for h in self.a if h.name not in a_ignored])
b_ignored = fnmatch.filter(b_names, pattern)
self.b = HDUList([h for h in self.b if h.name not in b_ignored])
# For now, just compare the extensions one by one in order.
# Might allow some more sophisticated types of diffing later.
# TODO: Somehow or another simplify the passing around of diff
# options--this will become important as the number of options grows
for idx in range(min(len(self.a), len(self.b))):
hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx])
if not hdu_diff.identical:
if (
self.a[idx].name == self.b[idx].name
and self.a[idx].ver == self.b[idx].ver
):
self.diff_hdus.append(
(idx, hdu_diff, self.a[idx].name, self.a[idx].ver)
)
else:
self.diff_hdus.append((idx, hdu_diff, "", self.a[idx].ver))
def _report(self):
wrapper = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ")
self._fileobj.write("\n")
self._writeln(f" fitsdiff: {__version__}")
self._writeln(f" a: {self.filenamea}\n b: {self.filenameb}")
if self.ignore_hdus:
ignore_hdus = " ".join(sorted(self.ignore_hdus))
self._writeln(" HDU(s) not to be compared:\n" + wrapper.fill(ignore_hdus))
if self.ignore_hdu_patterns:
ignore_hdu_patterns = " ".join(sorted(self.ignore_hdu_patterns))
self._writeln(
" HDU(s) not to be compared:\n" + wrapper.fill(ignore_hdu_patterns)
)
if self.ignore_keywords:
ignore_keywords = " ".join(sorted(self.ignore_keywords))
self._writeln(
" Keyword(s) not to be compared:\n" + wrapper.fill(ignore_keywords)
)
if self.ignore_comments:
ignore_comments = " ".join(sorted(self.ignore_comments))
self._writeln(
" Keyword(s) whose comments are not to be compared:\n"
+ wrapper.fill(ignore_comments)
)
if self.ignore_fields:
ignore_fields = " ".join(sorted(self.ignore_fields))
self._writeln(
" Table column(s) not to be compared:\n" + wrapper.fill(ignore_fields)
)
self._writeln(
f" Maximum number of different data values to be reported: {self.numdiffs}"
)
self._writeln(
f" Relative tolerance: {self.rtol}, Absolute tolerance: {self.atol}"
)
if self.diff_hdu_count:
self._fileobj.write("\n")
self._writeln("Files contain different numbers of HDUs:")
self._writeln(f" a: {self.diff_hdu_count[0]}")
self._writeln(f" b: {self.diff_hdu_count[1]}")
if not self.diff_hdus:
self._writeln("No differences found between common HDUs.")
return
elif not self.diff_hdus:
self._fileobj.write("\n")
self._writeln("No differences found.")
return
for idx, hdu_diff, extname, extver in self.diff_hdus:
# print out the extension heading
if idx == 0:
self._fileobj.write("\n")
self._writeln("Primary HDU:")
else:
self._fileobj.write("\n")
if extname:
self._writeln(f"Extension HDU {idx} ({extname}, {extver}):")
else:
self._writeln(f"Extension HDU {idx}:")
hdu_diff.report(self._fileobj, indent=self._indent + 1)
class HDUDiff(_BaseDiff):
"""
Diff two HDU objects, including their headers and their data (but only if
both HDUs contain the same type of data (image, table, or unknown).
`HDUDiff` objects have the following diff attributes:
- ``diff_extnames``: If the two HDUs have different EXTNAME values, this
contains a 2-tuple of the different extension names.
- ``diff_extvers``: If the two HDUS have different EXTVER values, this
contains a 2-tuple of the different extension versions.
- ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this
contains a 2-tuple of the different extension levels.
- ``diff_extension_types``: If the two HDUs have different XTENSION values,
this contains a 2-tuple of the different extension types.
- ``diff_headers``: Contains a `HeaderDiff` object for the headers of the
two HDUs. This will always contain an object--it may be determined
whether the headers are different through ``diff_headers.identical``.
- ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or
`RawDataDiff` as appropriate for the data in the HDUs, and only if the
two HDUs have non-empty data of the same type (`RawDataDiff` is used for
HDUs containing non-empty data of an indeterminate type).
"""
def __init__(
self,
a,
b,
ignore_keywords=[],
ignore_comments=[],
ignore_fields=[],
numdiffs=10,
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.rtol = rtol
self.atol = atol
self.numdiffs = numdiffs
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.diff_extnames = ()
self.diff_extvers = ()
self.diff_extlevels = ()
self.diff_extension_types = ()
self.diff_headers = None
self.diff_data = None
super().__init__(a, b)
def _diff(self):
if self.a.name != self.b.name:
self.diff_extnames = (self.a.name, self.b.name)
if self.a.ver != self.b.ver:
self.diff_extvers = (self.a.ver, self.b.ver)
if self.a.level != self.b.level:
self.diff_extlevels = (self.a.level, self.b.level)
if self.a.header.get("XTENSION") != self.b.header.get("XTENSION"):
self.diff_extension_types = (
self.a.header.get("XTENSION"),
self.b.header.get("XTENSION"),
)
self.diff_headers = HeaderDiff.fromdiff(
self, self.a.header.copy(), self.b.header.copy()
)
if self.a.data is None or self.b.data is None:
# TODO: Perhaps have some means of marking this case
pass
elif self.a.is_image and self.b.is_image:
self.diff_data = ImageDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif isinstance(self.a, _TableLikeHDU) and isinstance(self.b, _TableLikeHDU):
# TODO: Replace this if/when _BaseHDU grows a .is_table property
self.diff_data = TableDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif not self.diff_extension_types:
# Don't diff the data for unequal extension types that are not
# recognized image or table types
self.diff_data = RawDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
def _report(self):
if self.identical:
self._writeln(" No differences found.")
if self.diff_extension_types:
self._writeln(
" Extension types differ:\n a: {}\n b: {}".format(
*self.diff_extension_types
)
)
if self.diff_extnames:
self._writeln(
" Extension names differ:\n a: {}\n b: {}".format(*self.diff_extnames)
)
if self.diff_extvers:
self._writeln(
" Extension versions differ:\n a: {}\n b: {}".format(
*self.diff_extvers
)
)
if self.diff_extlevels:
self._writeln(
" Extension levels differ:\n a: {}\n b: {}".format(
*self.diff_extlevels
)
)
if not self.diff_headers.identical:
self._fileobj.write("\n")
self._writeln(" Headers contain differences:")
self.diff_headers.report(self._fileobj, indent=self._indent + 1)
if self.diff_data is not None and not self.diff_data.identical:
self._fileobj.write("\n")
self._writeln(" Data contains differences:")
self.diff_data.report(self._fileobj, indent=self._indent + 1)
class HeaderDiff(_BaseDiff):
"""
Diff two `Header` objects.
`HeaderDiff` objects have the following diff attributes:
- ``diff_keyword_count``: If the two headers contain a different number of
keywords, this contains a 2-tuple of the keyword count for each header.
- ``diff_keywords``: If either header contains one or more keywords that
don't appear at all in the other header, this contains a 2-tuple
consisting of a list of the keywords only appearing in header a, and a
list of the keywords only appearing in header b.
- ``diff_duplicate_keywords``: If a keyword appears in both headers at
least once, but contains a different number of duplicates (for example, a
different number of HISTORY cards in each header), an item is added to
this dict with the keyword as the key, and a 2-tuple of the different
counts of that keyword as the value. For example::
{'HISTORY': (20, 19)}
means that header a contains 20 HISTORY cards, while header b contains
only 19 HISTORY cards.
- ``diff_keyword_values``: If any of the common keyword between the two
headers have different values, they appear in this dict. It has a
structure similar to ``diff_duplicate_keywords``, with the keyword as the
key, and a 2-tuple of the different values as the value. For example::
{'NAXIS': (2, 3)}
means that the NAXIS keyword has a value of 2 in header a, and a value of
3 in header b. This excludes any keywords matched by the
``ignore_keywords`` list.
- ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains
differences between keyword comments.
`HeaderDiff` objects also have a ``common_keywords`` attribute that lists
all keywords that appear in both headers.
"""
def __init__(
self,
a,
b,
ignore_keywords=[],
ignore_comments=[],
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : `~astropy.io.fits.Header` or string or bytes
A header.
b : `~astropy.io.fits.Header` or string or bytes
A header to compare to the first header.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.ignore_keyword_patterns = set()
self.ignore_comment_patterns = set()
for keyword in list(self.ignore_keywords):
keyword = keyword.upper()
if keyword != "*" and glob.has_magic(keyword):
self.ignore_keywords.remove(keyword)
self.ignore_keyword_patterns.add(keyword)
for keyword in list(self.ignore_comments):
keyword = keyword.upper()
if keyword != "*" and glob.has_magic(keyword):
self.ignore_comments.remove(keyword)
self.ignore_comment_patterns.add(keyword)
# Keywords appearing in each header
self.common_keywords = []
# Set to the number of keywords in each header if the counts differ
self.diff_keyword_count = ()
# Set if the keywords common to each header (excluding ignore_keywords)
# appear in different positions within the header
# TODO: Implement this
self.diff_keyword_positions = ()
# Keywords unique to each header (excluding keywords in
# ignore_keywords)
self.diff_keywords = ()
# Keywords that have different numbers of duplicates in each header
# (excluding keywords in ignore_keywords)
self.diff_duplicate_keywords = {}
# Keywords common to each header but having different values (excluding
# keywords in ignore_keywords)
self.diff_keyword_values = defaultdict(list)
# Keywords common to each header but having different comments
# (excluding keywords in ignore_keywords or in ignore_comments)
self.diff_keyword_comments = defaultdict(list)
if isinstance(a, str):
a = Header.fromstring(a)
if isinstance(b, str):
b = Header.fromstring(b)
if not (isinstance(a, Header) and isinstance(b, Header)):
raise TypeError(
"HeaderDiff can only diff astropy.io.fits.Header "
"objects or strings containing FITS headers."
)
super().__init__(a, b)
# TODO: This doesn't pay much attention to the *order* of the keywords,
# except in the case of duplicate keywords. The order should be checked
# too, or at least it should be an option.
def _diff(self):
if self.ignore_blank_cards:
cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD]
cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD]
else:
cardsa = list(self.a.cards)
cardsb = list(self.b.cards)
# build dictionaries of keyword values and comments
def get_header_values_comments(cards):
values = {}
comments = {}
for card in cards:
value = card.value
if self.ignore_blanks and isinstance(value, str):
value = value.rstrip()
values.setdefault(card.keyword, []).append(value)
comments.setdefault(card.keyword, []).append(card.comment)
return values, comments
valuesa, commentsa = get_header_values_comments(cardsa)
valuesb, commentsb = get_header_values_comments(cardsb)
# Normalize all keyword to upper-case for comparison's sake;
# TODO: HIERARCH keywords should be handled case-sensitively I think
keywordsa = {k.upper() for k in valuesa}
keywordsb = {k.upper() for k in valuesb}
self.common_keywords = sorted(keywordsa.intersection(keywordsb))
if len(cardsa) != len(cardsb):
self.diff_keyword_count = (len(cardsa), len(cardsb))
# Any other diff attributes should exclude ignored keywords
keywordsa = keywordsa.difference(self.ignore_keywords)
keywordsb = keywordsb.difference(self.ignore_keywords)
if self.ignore_keyword_patterns:
for pattern in self.ignore_keyword_patterns:
keywordsa = keywordsa.difference(fnmatch.filter(keywordsa, pattern))
keywordsb = keywordsb.difference(fnmatch.filter(keywordsb, pattern))
if "*" in self.ignore_keywords:
# Any other differences between keywords are to be ignored
return
left_only_keywords = sorted(keywordsa.difference(keywordsb))
right_only_keywords = sorted(keywordsb.difference(keywordsa))
if left_only_keywords or right_only_keywords:
self.diff_keywords = (left_only_keywords, right_only_keywords)
# Compare count of each common keyword
for keyword in self.common_keywords:
if keyword in self.ignore_keywords:
continue
if self.ignore_keyword_patterns:
skip = False
for pattern in self.ignore_keyword_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
counta = len(valuesa[keyword])
countb = len(valuesb[keyword])
if counta != countb:
self.diff_duplicate_keywords[keyword] = (counta, countb)
# Compare keywords' values and comments
for a, b in zip(valuesa[keyword], valuesb[keyword]):
if diff_values(a, b, rtol=self.rtol, atol=self.atol):
self.diff_keyword_values[keyword].append((a, b))
else:
# If there are duplicate keywords we need to be able to
# index each duplicate; if the values of a duplicate
# are identical use None here
self.diff_keyword_values[keyword].append(None)
if not any(self.diff_keyword_values[keyword]):
# No differences found; delete the array of Nones
del self.diff_keyword_values[keyword]
if "*" in self.ignore_comments or keyword in self.ignore_comments:
continue
if self.ignore_comment_patterns:
skip = False
for pattern in self.ignore_comment_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
for a, b in zip(commentsa[keyword], commentsb[keyword]):
if diff_values(a, b):
self.diff_keyword_comments[keyword].append((a, b))
else:
self.diff_keyword_comments[keyword].append(None)
if not any(self.diff_keyword_comments[keyword]):
del self.diff_keyword_comments[keyword]
def _report(self):
if self.diff_keyword_count:
self._writeln(" Headers have different number of cards:")
self._writeln(f" a: {self.diff_keyword_count[0]}")
self._writeln(f" b: {self.diff_keyword_count[1]}")
if self.diff_keywords:
for keyword in self.diff_keywords[0]:
if keyword in Card._commentary_keywords:
val = self.a[keyword][0]
else:
val = self.a[keyword]
self._writeln(f" Extra keyword {keyword!r:8} in a: {val!r}")
for keyword in self.diff_keywords[1]:
if keyword in Card._commentary_keywords:
val = self.b[keyword][0]
else:
val = self.b[keyword]
self._writeln(f" Extra keyword {keyword!r:8} in b: {val!r}")
if self.diff_duplicate_keywords:
for keyword, count in sorted(self.diff_duplicate_keywords.items()):
self._writeln(f" Inconsistent duplicates of keyword {keyword!r:8}:")
self._writeln(
" Occurs {} time(s) in a, {} times in (b)".format(*count)
)
if self.diff_keyword_values or self.diff_keyword_comments:
for keyword in self.common_keywords:
report_diff_keyword_attr(
self._fileobj,
"values",
self.diff_keyword_values,
keyword,
ind=self._indent,
)
report_diff_keyword_attr(
self._fileobj,
"comments",
self.diff_keyword_comments,
keyword,
ind=self._indent,
)
# TODO: It might be good if there was also a threshold option for percentage of
# different pixels: For example ignore if only 1% of the pixels are different
# within some threshold. There are lots of possibilities here, but hold off
# for now until specific cases come up.
class ImageDataDiff(_BaseDiff):
"""
Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE
extension HDU, though the data unit is assumed to be "pixels").
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: If the two arrays contain either a different number
of dimensions or different sizes in any dimension, this contains a
2-tuple of the shapes of each array. Currently no further comparison is
performed on images that don't have the exact same dimensions.
- ``diff_pixels``: If the two images contain any different pixels, this
contains a list of 2-tuples of the array index where the difference was
found, and another 2-tuple containing the different values. For example,
if the pixel at (0, 0) contains different values this would look like::
[(0, 0), (1.1, 2.2)]
where 1.1 and 2.2 are the values of that pixel in each array. This
array only contains up to ``self.numdiffs`` differences, for storage
efficiency.
- ``diff_total``: The total number of different pixels found between the
arrays. Although ``diff_pixels`` does not necessarily contain all the
different pixel values, this can be used to get a count of the total
number of differences found.
- ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number
of pixels in the arrays.
"""
def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.diff_dimensions = ()
self.diff_pixels = []
self.diff_ratio = 0
# self.diff_pixels only holds up to numdiffs differing pixels, but this
# self.diff_total stores the total count of differences between
# the images, but not the different values
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
if self.a.shape != self.b.shape:
self.diff_dimensions = (self.a.shape, self.b.shape)
# Don't do any further comparison if the dimensions differ
# TODO: Perhaps we could, however, diff just the intersection
# between the two images
return
# Find the indices where the values are not equal
# If neither a nor b are floating point (or complex), ignore rtol and
# atol
if not (
np.issubdtype(self.a.dtype, np.inexact)
or np.issubdtype(self.b.dtype, np.inexact)
):
rtol = 0
atol = 0
else:
rtol = self.rtol
atol = self.atol
diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol)
self.diff_total = len(diffs[0])
if self.diff_total == 0:
# Then we're done
return
if self.numdiffs < 0:
numdiffs = self.diff_total
else:
numdiffs = self.numdiffs
self.diff_pixels = [
(idx, (self.a[idx], self.b[idx]))
for idx in islice(zip(*diffs), 0, numdiffs)
]
self.diff_ratio = float(self.diff_total) / float(len(self.a.flat))
def _report(self):
if self.diff_dimensions:
dimsa = " x ".join(str(d) for d in reversed(self.diff_dimensions[0]))
dimsb = " x ".join(str(d) for d in reversed(self.diff_dimensions[1]))
self._writeln(" Data dimensions differ:")
self._writeln(f" a: {dimsa}")
self._writeln(f" b: {dimsb}")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_pixels:
return
for index, values in self.diff_pixels:
index = [x + 1 for x in reversed(index)]
self._writeln(f" Data differs at {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
" {} different pixels found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
class RawDataDiff(ImageDataDiff):
"""
`RawDataDiff` is just a special case of `ImageDataDiff` where the images
are one-dimensional, and the data is treated as a 1-dimensional array of
bytes instead of pixel values. This is used to compare the data of two
non-standard extension HDUs that were not recognized as containing image or
table data.
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of
`ImageDataDiff` objects. Though the "dimension" of each array is just an
integer representing the number of bytes in the data.
- ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff`
objects, but renamed to reflect the minor semantic difference that these
are raw bytes and not pixel values. Also the indices are integers
instead of tuples.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
"""
def __init__(self, a, b, numdiffs=10):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
"""
self.diff_dimensions = ()
self.diff_bytes = []
super().__init__(a, b, numdiffs=numdiffs)
def _diff(self):
super()._diff()
if self.diff_dimensions:
self.diff_dimensions = (
self.diff_dimensions[0][0],
self.diff_dimensions[1][0],
)
self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels]
del self.diff_pixels
def _report(self):
if self.diff_dimensions:
self._writeln(" Data sizes differ:")
self._writeln(f" a: {self.diff_dimensions[0]} bytes")
self._writeln(f" b: {self.diff_dimensions[1]} bytes")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_bytes:
return
for index, values in self.diff_bytes:
self._writeln(f" Data differs at byte {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
self._writeln(" ...")
self._writeln(
" {} different bytes found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
class TableDataDiff(_BaseDiff):
"""
Diff two table data arrays. It doesn't matter whether the data originally
came from a binary or ASCII table--the data should be passed in as a
recarray.
`TableDataDiff` objects have the following diff attributes:
- ``diff_column_count``: If the tables being compared have different
numbers of columns, this contains a 2-tuple of the column count in each
table. Even if the tables have different column counts, an attempt is
still made to compare any columns they have in common.
- ``diff_columns``: If either table contains columns unique to that table,
either in name or format, this contains a 2-tuple of lists. The first
element is a list of columns (these are full `Column` objects) that
appear only in table a. The second element is a list of tables that
appear only in table b. This only lists columns with different column
definitions, and has nothing to do with the data in those columns.
- ``diff_column_names``: This is like ``diff_columns``, but lists only the
names of columns unique to either table, rather than the full `Column`
objects.
- ``diff_column_attributes``: Lists columns that are in both tables but
have different secondary attributes, such as TUNIT or TDISP. The format
is a list of 2-tuples: The first a tuple of the column name and the
attribute, the second a tuple of the different values.
- ``diff_values``: `TableDataDiff` compares the data in each table on a
column-by-column basis. If any different data is found, it is added to
this list. The format of this list is similar to the ``diff_pixels``
attribute on `ImageDataDiff` objects, though the "index" consists of a
(column_name, row) tuple. For example::
[('TARGET', 0), ('NGC1001', 'NGC1002')]
shows that the tables contain different values in the 0-th row of the
'TARGET' column.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
`TableDataDiff` objects also have a ``common_columns`` attribute that lists
the `Column` objects for columns that are identical in both tables, and a
``common_column_names`` attribute which contains a set of the names of
those columns.
"""
def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.ignore_fields = set(ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.common_columns = []
self.common_column_names = set()
# self.diff_columns contains columns with different column definitions,
# but not different column data. Column data is only compared in
# columns that have the same definitions
self.diff_rows = ()
self.diff_column_count = ()
self.diff_columns = ()
# If two columns have the same name+format, but other attributes are
# different (such as TUNIT or such) they are listed here
self.diff_column_attributes = []
# Like self.diff_columns, but just contains a list of the column names
# unique to each table, and in the order they appear in the tables
self.diff_column_names = ()
self.diff_values = []
self.diff_ratio = 0
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
# Much of the code for comparing columns is similar to the code for
# comparing headers--consider refactoring
colsa = self.a.columns
colsb = self.b.columns
if len(colsa) != len(colsb):
self.diff_column_count = (len(colsa), len(colsb))
# Even if the number of columns are unequal, we still do comparison of
# any common columns
colsa = {c.name.lower(): c for c in colsa}
colsb = {c.name.lower(): c for c in colsb}
if "*" in self.ignore_fields:
# If all columns are to be ignored, ignore any further differences
# between the columns
return
# Keep the user's original ignore_fields list for reporting purposes,
# but internally use a case-insensitive version
ignore_fields = {f.lower() for f in self.ignore_fields}
# It might be nice if there were a cleaner way to do this, but for now
# it'll do
for fieldname in ignore_fields:
fieldname = fieldname.lower()
if fieldname in colsa:
del colsa[fieldname]
if fieldname in colsb:
del colsb[fieldname]
colsa_set = set(colsa.values())
colsb_set = set(colsb.values())
self.common_columns = sorted(
colsa_set.intersection(colsb_set), key=operator.attrgetter("name")
)
self.common_column_names = {col.name.lower() for col in self.common_columns}
left_only_columns = {
col.name.lower(): col for col in colsa_set.difference(colsb_set)
}
right_only_columns = {
col.name.lower(): col for col in colsb_set.difference(colsa_set)
}
if left_only_columns or right_only_columns:
self.diff_columns = (left_only_columns, right_only_columns)
self.diff_column_names = ([], [])
if left_only_columns:
for col in self.a.columns:
if col.name.lower() in left_only_columns:
self.diff_column_names[0].append(col.name)
if right_only_columns:
for col in self.b.columns:
if col.name.lower() in right_only_columns:
self.diff_column_names[1].append(col.name)
# If the tables have a different number of rows, we don't compare the
# columns right now.
# TODO: It might be nice to optionally compare the first n rows where n
# is the minimum of the row counts between the two tables.
if len(self.a) != len(self.b):
self.diff_rows = (len(self.a), len(self.b))
return
# If the tables contain no rows there's no data to compare, so we're
# done at this point. (See ticket #178)
if len(self.a) == len(self.b) == 0:
return
# Like in the old fitsdiff, compare tables on a column by column basis
# The difficulty here is that, while FITS column names are meant to be
# case-insensitive, Astropy still allows, for the sake of flexibility,
# two columns with the same name but different case. When columns are
# accessed in FITS tables, a case-sensitive is tried first, and failing
# that a case-insensitive match is made.
# It's conceivable that the same column could appear in both tables
# being compared, but with different case.
# Though it *may* lead to inconsistencies in these rare cases, this
# just assumes that there are no duplicated column names in either
# table, and that the column names can be treated case-insensitively.
for col in self.common_columns:
name_lower = col.name.lower()
if name_lower in ignore_fields:
continue
cola = colsa[name_lower]
colb = colsb[name_lower]
for attr, _ in _COL_ATTRS:
vala = getattr(cola, attr, None)
valb = getattr(colb, attr, None)
if diff_values(vala, valb):
self.diff_column_attributes.append(
((col.name.upper(), attr), (vala, valb))
)
arra = self.a[col.name]
arrb = self.b[col.name]
if np.issubdtype(arra.dtype, np.floating) and np.issubdtype(
arrb.dtype, np.floating
):
diffs = where_not_allclose(arra, arrb, rtol=self.rtol, atol=self.atol)
elif "P" in col.format or "Q" in col.format:
diffs = (
[
idx
for idx in range(len(arra))
if not np.allclose(
arra[idx], arrb[idx], rtol=self.rtol, atol=self.atol
)
],
)
else:
diffs = np.where(arra != arrb)
self.diff_total += len(set(diffs[0]))
if self.numdiffs >= 0:
if len(self.diff_values) >= self.numdiffs:
# Don't save any more diff values
continue
# Add no more diff'd values than this
max_diffs = self.numdiffs - len(self.diff_values)
else:
max_diffs = len(diffs[0])
last_seen_idx = None
for idx in islice(diffs[0], 0, max_diffs):
if idx == last_seen_idx:
# Skip duplicate indices, which my occur when the column
# data contains multi-dimensional values; we're only
# interested in storing row-by-row differences
continue
last_seen_idx = idx
self.diff_values.append(((col.name, idx), (arra[idx], arrb[idx])))
total_values = len(self.a) * len(self.a.dtype.fields)
self.diff_ratio = float(self.diff_total) / float(total_values)
def _report(self):
if self.diff_column_count:
self._writeln(" Tables have different number of columns:")
self._writeln(f" a: {self.diff_column_count[0]}")
self._writeln(f" b: {self.diff_column_count[1]}")
if self.diff_column_names:
# Show columns with names unique to either table
for name in self.diff_column_names[0]:
format = self.diff_columns[0][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in a")
for name in self.diff_column_names[1]:
format = self.diff_columns[1][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in b")
col_attrs = dict(_COL_ATTRS)
# Now go through each table again and show columns with common
# names but other property differences...
for col_attr, vals in self.diff_column_attributes:
name, attr = col_attr
self._writeln(f" Column {name} has different {col_attrs[attr]}:")
report_diff_values(
vals[0],
vals[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_rows:
self._writeln(" Table rows differ:")
self._writeln(f" a: {self.diff_rows[0]}")
self._writeln(f" b: {self.diff_rows[1]}")
self._writeln(" No further data comparison performed.")
return
if not self.diff_values:
return
# Finally, let's go through and report column data differences:
for indx, values in self.diff_values:
self._writeln(" Column {} data differs in row {}:".format(*indx))
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_values and self.numdiffs < self.diff_total:
self._writeln(
f" ...{self.diff_total - self.numdiffs} additional difference(s) found."
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
" {} different table data element(s) found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
"""
Write a diff between two header keyword values or comments to the specified
file-like object.
"""
if keyword in diffs:
vals = diffs[keyword]
for idx, val in enumerate(vals):
if val is None:
continue
if idx == 0:
dup = ""
else:
dup = f"[{idx + 1}]"
fileobj.write(
fixed_width_indent(
f" Keyword {keyword:8}{dup} has different {attr}:\n",
ind,
)
)
report_diff_values(val[0], val[1], fileobj=fileobj, indent_width=ind + 1)
|
b3924c6d7f34706dd5743a0f1f43f2f4732197580c476d6eb5348dcc114afef3 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
"""
A package for reading and writing FITS files and manipulating their
contents.
A module for reading and writing Flexible Image Transport System
(FITS) files. This file format was endorsed by the International
Astronomical Union in 1999 and mandated by NASA as the standard format
for storing high energy astrophysics data. For details of the FITS
standard, see the NASA/Science Office of Standards and Technology
publication, NOST 100-2.0.
"""
from astropy import config as _config
# Set module-global boolean variables
# TODO: Make it possible to set these variables via environment variables
# again, once support for that is added to Astropy
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.fits`.
"""
enable_record_valued_keyword_cards = _config.ConfigItem(
True,
"If True, enable support for record-valued keywords as described by "
"FITS WCS distortion paper. Otherwise they are treated as normal "
"keywords.",
aliases=["astropy.io.fits.enabled_record_valued_keyword_cards"],
)
extension_name_case_sensitive = _config.ConfigItem(
False,
"If True, extension names (i.e. the ``EXTNAME`` keyword) should be "
"treated as case-sensitive.",
)
strip_header_whitespace = _config.ConfigItem(
True,
"If True, automatically remove trailing whitespace for string values in"
" headers. Otherwise the values are returned verbatim, with all "
"whitespace intact.",
)
use_memmap = _config.ConfigItem(
True,
"If True, use memory-mapped file access to read/write the data in "
"FITS files. This generally provides better performance, especially "
"for large files, but may affect performance in I/O-heavy "
"applications.",
)
lazy_load_hdus = _config.ConfigItem(
True,
"If True, use lazy loading of HDUs when opening FITS files by "
"default; that is fits.open() will only seek for and read HDUs on "
"demand rather than reading all HDUs at once. See the documentation "
"for fits.open() for more details.",
)
enable_uint = _config.ConfigItem(
True,
"If True, default to recognizing the convention for representing "
"unsigned integers in FITS--if an array has BITPIX > 0, BSCALE = 1, "
"and BZERO = 2**BITPIX, represent the data as unsigned integers "
"per this convention.",
)
conf = Conf()
# Public API compatibility imports
# These need to come after the global config variables, as some of the
# submodules use them
from . import card, column, convenience, hdu
from .card import *
from .column import *
from .convenience import *
from .diff import *
from .fitsrec import FITS_rec, FITS_record
from .hdu import *
from .hdu.compressed import CompImageSection
from .hdu.groups import GroupData
from .hdu.hdulist import fitsopen as open
from .hdu.image import Section
from .header import Header
from .verify import VerifyError
__all__ = (
["Conf", "conf"]
+ card.__all__
+ column.__all__
+ convenience.__all__
+ hdu.__all__
+ [
"FITS_record",
"FITS_rec",
"GroupData",
"open",
"Section",
"Header",
"VerifyError",
"conf",
]
)
|
217e15687d76cc97c108e4f7454cafa357c7bf624833d707b7220a262e73362f | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import numbers
import operator
import re
import sys
import warnings
import weakref
from collections import OrderedDict
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import indent, isiterable, lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from .card import CARD_LENGTH, Card
from .util import NotifierMixin, _convert_array, _is_int, cmp, encode_ascii, pairwise
from .verify import VerifyError, VerifyWarning
__all__ = ["Column", "ColDefs", "Delayed"]
# mapping from TFORM data type to numpy data type (code)
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
FITS2NUMPY = {
"L": "i1",
"B": "u1",
"I": "i2",
"J": "i4",
"K": "i8",
"E": "f4",
"D": "f8",
"C": "c8",
"M": "c16",
"A": "a",
}
# the inverse dictionary of the above
NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}
# Normally booleans are represented as ints in Astropy, but if passed in a numpy
# boolean array, that should be supported
NUMPY2FITS["b1"] = "L"
# Add unsigned types, which will be stored as signed ints with a TZERO card.
NUMPY2FITS["u2"] = "I"
NUMPY2FITS["u4"] = "J"
NUMPY2FITS["u8"] = "K"
# Add half precision floating point numbers which will be up-converted to
# single precision.
NUMPY2FITS["f2"] = "E"
# This is the order in which values are converted to FITS types
# Note that only double precision floating point/complex are supported
FORMATORDER = ["L", "B", "I", "J", "K", "D", "M", "A"]
# Convert single precision floating point/complex to double precision.
FITSUPCONVERTERS = {"E": "D", "C": "M"}
# mapping from ASCII table TFORM data type to numpy data type
# A: Character
# I: Integer (32-bit)
# J: Integer (64-bit; non-standard)
# F: Float (64-bit; fixed decimal notation)
# E: Float (64-bit; exponential notation)
# D: Float (64-bit; exponential notation, always 64-bit by convention)
ASCII2NUMPY = {"A": "a", "I": "i4", "J": "i8", "F": "f8", "E": "f8", "D": "f8"}
# Maps FITS ASCII column format codes to the appropriate Python string
# formatting codes for that type.
ASCII2STR = {"A": "", "I": "d", "J": "d", "F": "f", "E": "E", "D": "E"}
# For each ASCII table format code, provides a default width (and decimal
# precision) for when one isn't given explicitly in the column format
ASCII_DEFAULT_WIDTHS = {
"A": (1, 0),
"I": (10, 0),
"J": (15, 0),
"E": (15, 7),
"F": (16, 7),
"D": (25, 17),
}
# TDISPn for both ASCII and Binary tables
TDISP_RE_DICT = {}
TDISP_RE_DICT["F"] = re.compile(
r"(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}(?P<precision>[0-9])+)+)|"
)
TDISP_RE_DICT["A"] = TDISP_RE_DICT["L"] = re.compile(
r"(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|"
)
TDISP_RE_DICT["I"] = TDISP_RE_DICT["B"] = TDISP_RE_DICT["O"] = TDISP_RE_DICT[
"Z"
] = re.compile(
r"(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)"
r"(?:\.{0,1}(?P<precision>[0-9]+))?))|"
)
TDISP_RE_DICT["E"] = TDISP_RE_DICT["G"] = TDISP_RE_DICT["D"] = re.compile(
r"(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\."
r"(?P<precision>[0-9]+))+)"
r"(?:E{0,1}(?P<exponential>[0-9]+)?)|"
)
TDISP_RE_DICT["EN"] = TDISP_RE_DICT["ES"] = re.compile(
r"(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}(?P<precision>[0-9])+)+)"
)
# mapping from TDISP format to python format
# A: Character
# L: Logical (Boolean)
# I: 16-bit Integer
# Can't predefine zero padding and space padding before hand without
# knowing the value being formatted, so grabbing precision and using that
# to zero pad, ignoring width. Same with B, O, and Z
# B: Binary Integer
# O: Octal Integer
# Z: Hexadecimal Integer
# F: Float (64-bit; fixed decimal notation)
# EN: Float (engineering fortran format, exponential multiple of thee
# ES: Float (scientific, same as EN but non-zero leading digit
# E: Float, exponential notation
# Can't get exponential restriction to work without knowing value
# before hand, so just using width and precision, same with D, G, EN, and
# ES formats
# D: Double-precision Floating Point with exponential
# (E but for double precision)
# G: Double-precision Floating Point, may or may not show exponent
TDISP_FMT_DICT = {
"I": "{{:{width}d}}",
"B": "{{:{width}b}}",
"O": "{{:{width}o}}",
"Z": "{{:{width}x}}",
"F": "{{:{width}.{precision}f}}",
"G": "{{:{width}.{precision}g}}",
}
TDISP_FMT_DICT["A"] = TDISP_FMT_DICT["L"] = "{{:>{width}}}"
TDISP_FMT_DICT["E"] = TDISP_FMT_DICT["D"] = TDISP_FMT_DICT["EN"] = TDISP_FMT_DICT[
"ES"
] = "{{:{width}.{precision}e}}"
# tuple of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
KEYWORD_NAMES = (
"TTYPE",
"TFORM",
"TUNIT",
"TNULL",
"TSCAL",
"TZERO",
"TDISP",
"TBCOL",
"TDIM",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
)
KEYWORD_ATTRIBUTES = (
"name",
"format",
"unit",
"null",
"bscale",
"bzero",
"disp",
"start",
"dim",
"coord_type",
"coord_unit",
"coord_ref_point",
"coord_ref_value",
"coord_inc",
"time_ref_pos",
)
"""This is a list of the attributes that can be set on `Column` objects."""
KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))
# TODO: Define a list of default comments to associate with each table keyword
# TFORMn regular expression
TFORMAT_RE = re.compile(
r"(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])(?P<option>[!-~]*)", re.I
)
# TFORMn for ASCII tables; two different versions depending on whether
# the format is floating-point or not; allows empty values for width
# in which case defaults are used
TFORMAT_ASCII_RE = re.compile(
r"(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|"
r"(?:(?P<formatf>[FED])"
r"(?:(?P<widthf>[0-9]+)(?:\."
r"(?P<precision>[0-9]+))?)?)"
)
TTYPE_RE = re.compile(r"[0-9a-zA-Z_]+")
"""
Regular expression for valid table column names. See FITS Standard v3.0 section 7.2.2.
"""
# table definition keyword regular expression
TDEF_RE = re.compile(r"(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)")
# table dimension keyword regular expression (fairly flexible with whitespace)
TDIM_RE = re.compile(r"\(\s*(?P<dims>(?:\d+\s*)(?:,\s*\d+\s*)*\s*)\)\s*")
# value for ASCII table cell with value = TNULL
# this can be reset by user.
ASCIITNULL = 0
# The default placeholder to use for NULL values in ASCII tables when
# converting from binary to ASCII tables
DEFAULT_ASCII_TNULL = "---"
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = weakref.proxy(hdu)
self.field = field
def __getitem__(self, key):
# This forces the data for the HDU to be read, which will replace
# the corresponding Delayed objects in the Tables Columns to be
# transformed into ndarrays. It will also return the value of the
# requested data element.
return self.hdu.data[key][self.field]
class _BaseColumnFormat(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
class _ColumnFormat(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ("P", "Q"):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == "P":
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ""
else:
repeat = str(self.repeat)
return f"{repeat}{self.format}{self.option}"
class _AsciiColumnFormat(_BaseColumnFormat):
"""Similar to _ColumnFormat but specifically for columns in ASCII tables.
The formats of ASCII table columns and binary table columns are inherently
incompatible in FITS. They don't support the same ranges and types of
values, and even reuse format codes in subtly different ways. For example
the format code 'Iw' in ASCII columns refers to any integer whose string
representation is at most w characters wide, so 'I' can represent
effectively any integer that will fit in a FITS columns. Whereas for
binary tables 'I' very explicitly refers to a 16-bit signed integer.
Conversions between the two column formats can be performed using the
``to/from_binary`` methods on this class, or the ``to/from_ascii``
methods on the `_ColumnFormat` class. But again, not all conversions are
possible and may result in a `ValueError`.
"""
def __new__(cls, format, strict=False):
self = super().__new__(cls, format)
self.format, self.width, self.precision = _parse_ascii_tformat(format, strict)
# If no width has been specified, set the dtype here to default as well
if format == self.format:
self.recformat = ASCII2NUMPY[format]
# This is to support handling logical (boolean) data from binary tables
# in an ASCII table
self._pseudo_logical = False
return self
@classmethod
def from_column_format(cls, format):
inst = cls.from_recformat(format.recformat)
# Hack
if format.format == "L":
inst._pseudo_logical = True
return inst
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_ascii_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_ascii_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of Tw.d where T is the single character data
type code, w is the width in characters for this field, and d is the
number of digits after the decimal place (for format codes 'E', 'F',
and 'D' only).
"""
if self.format in ("E", "F", "D"):
return f"{self.format}{self.width}.{self.precision}"
return f"{self.format}{self.width}"
class _FormatX(str):
"""For X format in binary tables."""
def __new__(cls, repeat=1):
nbytes = ((repeat - 1) // 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
obj = super().__new__(cls, repr((nbytes,)) + "u1")
obj.repeat = repeat
return obj
def __getnewargs__(self):
return (self.repeat,)
@property
def tform(self):
return f"{self.repeat}X"
# TODO: Table column formats need to be verified upon first reading the file;
# as it is, an invalid P format will raise a VerifyError from some deep,
# unexpected place
class _FormatP(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (
r"(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])(?:\((?P<max>\d*)\))?"
)
_format_code = "P"
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = "2i4"
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group("dtype") not in FITS2NUMPY:
raise VerifyError(f"Invalid column format: {format}")
repeat = m.group("repeat")
array_dtype = m.group("dtype")
max = m.group("max")
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = "" if self.repeat is None else self.repeat
max = "" if self.max is None else self.max
return f"{repeat}{self._format_code}{self.format}({max})"
class _FormatQ(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = "Q"
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = "2i8"
class ColumnAttribute:
"""
Descriptor for attributes of `Column` that are associated with keywords
in the FITS header and describe properties of the column as specified in
the FITS standard.
Each `ColumnAttribute` may have a ``validator`` method defined on it.
This validates values set on this attribute to ensure that they meet the
FITS standard. Invalid values will raise a warning and will not be used in
formatting the column. The validator should take two arguments--the
`Column` it is being assigned to, and the new value for the attribute, and
it must raise an `AssertionError` if the value is invalid.
The `ColumnAttribute` itself is a decorator that can be used to define the
``validator`` for each column attribute. For example::
@ColumnAttribute('TTYPE')
def name(col, name):
if not isinstance(name, str):
raise AssertionError
The actual object returned by this decorator is the `ColumnAttribute`
instance though, not the ``name`` function. As such ``name`` is not a
method of the class it is defined in.
The setter for `ColumnAttribute` also updates the header of any table
HDU this column is attached to in order to reflect the change. The
``validator`` should ensure that the value is valid for inclusion in a FITS
header.
"""
def __init__(self, keyword):
self._keyword = keyword
self._validator = None
# The name of the attribute associated with this keyword is currently
# determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
# make more flexible in the future, for example, to support custom
# column attributes.
self._attr = "_" + KEYWORD_TO_ATTRIBUTE[self._keyword]
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return getattr(obj, self._attr)
def __set__(self, obj, value):
if self._validator is not None:
self._validator(obj, value)
old_value = getattr(obj, self._attr, None)
setattr(obj, self._attr, value)
obj._notify("column_attribute_changed", obj, self._attr[1:], old_value, value)
def __call__(self, func):
"""
Set the validator for this column attribute.
Returns ``self`` so that this can be used as a decorator, as described
in the docs for this class.
"""
self._validator = func
return self
def __repr__(self):
return f"{self.__class__.__name__}('{self._keyword}')"
class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(
self,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
array=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`astropy:column_creation`
and :ref:`astropy:creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError("Must specify format to construct Column.")
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {"ascii": ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ["The following keyword arguments to Column were invalid:"]
for val in invalid_kwargs.values():
msg.append(indent(val[1]))
raise VerifyError("\n".join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs["recformat"]
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array, (np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError(
f"Data is inconsistent with the format `{format}`."
)
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ""
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + " = " + repr(value) + "; "
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if "array" in self.__dict__:
return self.__dict__["array"]
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if hasattr(base, "_coldefs") and isinstance(base._coldefs, ColDefs):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if "array" in self.__dict__:
del self.__dict__["array"]
return
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self.__dict__["array"] = array
@array.deleter
def array(self):
try:
del self.__dict__["array"]
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute("TTYPE")
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
"It is strongly recommended that column names contain only "
"upper and lower-case ASCII letters, digits, or underscores "
"for maximum compatibility with other software "
"(got {!r}).".format(name),
VerifyWarning,
)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if not isinstance(name, str) or len(str(Card("TTYPE", name))) != CARD_LENGTH:
raise AssertionError(
"Column name must be a string able to fit in a single "
"FITS card--typically this means a maximum of 68 "
"characters, though it may be fewer if the string "
"contains special characters like quotes."
)
@ColumnAttribute("TCTYP")
def coord_type(col, coord_type):
if coord_type is None:
return
if not isinstance(coord_type, str) or len(coord_type) > 8:
raise AssertionError(
"Coordinate/axis type must be a string of atmost 8 characters."
)
@ColumnAttribute("TCUNI")
def coord_unit(col, coord_unit):
if coord_unit is not None and not isinstance(coord_unit, str):
raise AssertionError("Coordinate/axis unit must be a string.")
@ColumnAttribute("TCRPX")
def coord_ref_point(col, coord_ref_point):
if coord_ref_point is not None and not isinstance(
coord_ref_point, numbers.Real
):
raise AssertionError(
"Pixel coordinate of the reference point must be real floating type."
)
@ColumnAttribute("TCRVL")
def coord_ref_value(col, coord_ref_value):
if coord_ref_value is not None and not isinstance(
coord_ref_value, numbers.Real
):
raise AssertionError(
"Coordinate value at reference point must be real floating type."
)
@ColumnAttribute("TCDLT")
def coord_inc(col, coord_inc):
if coord_inc is not None and not isinstance(coord_inc, numbers.Real):
raise AssertionError("Coordinate increment must be real floating type.")
@ColumnAttribute("TRPOS")
def time_ref_pos(col, time_ref_pos):
if time_ref_pos is not None and not isinstance(time_ref_pos, str):
raise AssertionError("Time reference position must be a string.")
format = ColumnAttribute("TFORM")
unit = ColumnAttribute("TUNIT")
null = ColumnAttribute("TNULL")
bscale = ColumnAttribute("TSCAL")
bzero = ColumnAttribute("TZERO")
disp = ColumnAttribute("TDISP")
start = ColumnAttribute("TBCOL")
dim = ColumnAttribute("TDIM")
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format="I") # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError(f"Illegal format `{format}`.")
return format, recformat
@classmethod
def _verify_keywords(
cls,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
try:
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
except (ValueError, VerifyError) as err:
msg = (
f"Column format option (TFORMn) failed verification: {err!s} "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
except AttributeError as err:
msg = (
"Column format option (TFORMn) must be a string with a valid "
f"FITS table format (got {format!s}: {err!s}). "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
for k, v in [
("name", name),
("unit", unit),
("bscale", bscale),
("bzero", bzero),
]:
if v is not None and v != "":
valid[k] = v
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != "":
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
"(got {!r}).".format(null)
)
else:
tnull_formats = ("B", "I", "J", "K")
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
"Column null option (TNULLn) must be an integer for "
"binary table columns (got {!r}). The invalid value "
"will be ignored for the purpose of formatting "
"the data in this column.".format(null)
)
elif not (
format.format in tnull_formats
or (
format.format in ("P", "Q") and format.p_format in tnull_formats
)
):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
"Column null option (TNULLn) is invalid for binary "
"table columns of type {!r} (got {!r}). The invalid "
"value will be ignored for the purpose of formatting "
"the data in this column.".format(format, null)
)
if msg is None:
valid["null"] = null
else:
invalid["null"] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != "":
msg = None
if not isinstance(disp, str):
msg = (
"Column disp option (TDISPn) must be a string (got "
f"{disp!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
elif isinstance(format, _AsciiColumnFormat) and disp[0].upper() == "L":
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column."
)
if msg is None:
try:
_parse_tdisp_format(disp)
valid["disp"] = disp
except VerifyError as err:
msg = (
"Column disp option (TDISPn) failed verification: "
f"{err!s} The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
invalid["disp"] = (disp, msg)
else:
invalid["disp"] = (disp, msg)
# Validate the start option
if start is not None and start != "":
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
"Column start option (TBCOLn) is not allowed for binary "
"table columns (got {!r}). The invalid keyword will be "
"ignored for the purpose of formatting the data in this "
"column.".format(start)
)
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
"Column start option (TBCOLn) must be a positive integer "
"(got {!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column.".format(start)
)
if msg is None:
valid["start"] = start
else:
invalid["start"] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != "":
msg = None
dims_tuple = tuple()
# NOTE: If valid, the dim keyword's value in the the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
"Column dim option (TDIMn) is not allowed for ASCII table "
"columns (got {!r}). The invalid keyword will be ignored "
"for the purpose of formatting this column.".format(dim)
)
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column."
)
if dims_tuple:
if isinstance(recformat, _FormatP):
# TDIMs have different meaning for VLA format,
# no warning should be thrown
msg = None
elif reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
"The repeat count of the column format {!r} for column {!r} "
"is fewer than the number of elements per the TDIM "
"argument {!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column.".format(
name, format, dim
)
)
if msg is None:
valid["dim"] = dims_tuple
else:
invalid["dim"] = (dim, msg)
if coord_type is not None and coord_type != "":
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_type)
)
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"of atmost 8 characters (got {!r}). The invalid keyword "
"will be ignored for the purpose of formatting this "
"column.".format(coord_type)
)
if msg is None:
valid["coord_type"] = coord_type
else:
invalid["coord_type"] = (coord_type, msg)
if coord_unit is not None and coord_unit != "":
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_unit)
)
if msg is None:
valid["coord_unit"] = coord_unit
else:
invalid["coord_unit"] = (coord_unit, msg)
for k, v in [
("coord_ref_point", coord_ref_point),
("coord_ref_value", coord_ref_value),
("coord_inc", coord_inc),
]:
if v is not None and v != "":
msg = None
if not isinstance(v, numbers.Real):
msg = (
"Column {} option ({}n) must be a real floating type (got"
" {!r}). The invalid value will be ignored for the purpose of"
" formatting the data in this column.".format(
k, ATTRIBUTE_TO_KEYWORD[k], v
)
)
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != "":
msg = None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
"a string (got {!r}). The invalid keyword will be ignored for "
"the purpose of formatting this column.".format(time_ref_pos)
)
if msg is None:
valid["time_ref_pos"] = time_ref_pos
else:
invalid["time_ref_pos"] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format, _AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
"Columns cannot have both a start (TCOLn) and dim "
"(TDIMn) option, since the former is only applies to "
"ASCII tables, and the latter is only valid for binary tables."
)
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (
_AsciiColumnFormat if guess_format is _ColumnFormat else _ColumnFormat
)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims and format.format not in "PQ":
shape = dims[:-1] if "A" in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if "P" in format or "Q" in format:
return array
elif "A" in format:
if array.dtype.char in "SU":
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif "L" in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype("bool"):
return np.where(array == np.False_, ord("F"), ord("T"))
else:
return np.where(array == 0, ord("F"), ord("T"))
elif "X" in format:
return _convert_array(array, np.dtype("uint8"))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {
2: np.uint16(2**15),
4: np.uint32(2**31),
8: np.uint64(2**63),
}
if (
array.dtype.kind == "u"
and array.dtype.itemsize in bzeros
and self.bscale in (1, None, "")
and self.bzero == bzeros[array.dtype.itemsize]
):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace("i", "u")
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
class ColDefs(NotifierMixin):
"""
Column definitions class.
It has attributes corresponding to the `Column` attributes
(e.g. `ColDefs` has the attribute ``names`` while `Column`
has ``name``). Each attribute in `ColDefs` is a list of
corresponding attribute values from all `Column` objects.
"""
_padding_byte = "\x00"
_col_format_cls = _ColumnFormat
def __new__(cls, input, ascii=False):
klass = cls
if hasattr(input, "_columns_type") and issubclass(input._columns_type, ColDefs):
klass = input._columns_type
elif hasattr(input, "_col_format_cls") and issubclass(
input._col_format_cls, _AsciiColumnFormat
):
klass = _AsciiColDefs
if ascii: # force ASCII if this has been explicitly requested
klass = _AsciiColDefs
return object.__new__(klass)
def __getnewargs__(self):
return (self._arrays,)
def __init__(self, input, ascii=False):
"""
Parameters
----------
input : sequence of `Column` or `ColDefs` or ndarray or `~numpy.recarray`
An existing table HDU, an existing `ColDefs`, or any multi-field
Numpy array or `numpy.recarray`.
ascii : bool
Use True to ensure that ASCII table columns are used.
"""
from .fitsrec import FITS_rec
from .hdu.table import _TableBaseHDU
if isinstance(input, ColDefs):
self._init_from_coldefs(input)
elif (
isinstance(input, FITS_rec)
and hasattr(input, "_coldefs")
and input._coldefs
):
# If given a FITS_rec object we can directly copy its columns, but
# only if its columns have already been defined, otherwise this
# will loop back in on itself and blow up
self._init_from_coldefs(input._coldefs)
elif isinstance(input, np.ndarray) and input.dtype.fields is not None:
# Construct columns from the fields of a record array
self._init_from_array(input)
elif isiterable(input):
# if the input is a list of Columns
self._init_from_sequence(input)
elif isinstance(input, _TableBaseHDU):
# Construct columns from fields in an HDU header
self._init_from_table(input)
else:
raise TypeError(
"Input to ColDefs must be a table HDU, a list "
"of Columns, or a record/field array."
)
# Listen for changes on all columns
for col in self.columns:
col._add_listener(self)
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
"""
self.columns = [self._copy_column(col) for col in coldefs]
def _init_from_sequence(self, columns):
for idx, col in enumerate(columns):
if not isinstance(col, Column):
raise TypeError(f"Element {idx} in the ColDefs input is not a Column.")
self._init_from_coldefs(columns)
def _init_from_array(self, array):
self.columns = []
for idx in range(len(array.dtype)):
cname = array.dtype.names[idx]
ftype = array.dtype.fields[cname][0]
format = self._col_format_cls.from_recformat(ftype)
# Determine the appropriate dimensions for items in the column
dim = array.dtype[idx].shape[::-1]
if dim and (len(dim) > 0 or "A" in format):
if "A" in format:
# should take into account multidimensional items in the column
dimel = int(re.findall("[0-9]+", str(ftype.subdtype[0]))[0])
# n x m string arrays must include the max string
# length in their dimensions (e.g. l x n x m)
dim = (dimel,) + dim
dim = "(" + ",".join(str(d) for d in dim) + ")"
else:
dim = None
# Check for unsigned ints.
bzero = None
if ftype.base.kind == "u":
if "I" in format:
bzero = np.uint16(2**15)
elif "J" in format:
bzero = np.uint32(2**31)
elif "K" in format:
bzero = np.uint64(2**63)
c = Column(
name=cname,
format=format,
array=array.view(np.ndarray)[cname],
bzero=bzero,
dim=dim,
)
self.columns.append(c)
def _init_from_table(self, table):
hdr = table._header
nfields = hdr["TFIELDS"]
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
col_keywords = [{} for i in range(nfields)]
for keyword in hdr:
key = TDEF_RE.match(keyword)
try:
label = key.group("label")
except Exception:
continue # skip if there is no match
if label in KEYWORD_NAMES:
col = int(key.group("num"))
if 0 < col <= nfields:
attr = KEYWORD_TO_ATTRIBUTE[label]
value = hdr[keyword]
if attr == "format":
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
value = self._col_format_cls(value)
col_keywords[col - 1][attr] = value
# Verify the column keywords and display any warnings if necessary;
# we only want to pass on the valid keywords
for idx, kwargs in enumerate(col_keywords):
valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)
for val in invalid_kwargs.values():
warnings.warn(
f"Invalid keyword for column {idx + 1}: {val[1]}", VerifyWarning
)
# Special cases for recformat and dim
# TODO: Try to eliminate the need for these special cases
del valid_kwargs["recformat"]
if "dim" in valid_kwargs:
valid_kwargs["dim"] = kwargs["dim"]
col_keywords[idx] = valid_kwargs
# data reading will be delayed
for col in range(nfields):
col_keywords[col]["array"] = Delayed(table, col)
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
# Add the table HDU is a listener to changes to the columns
# (either changes to individual columns, or changes to the set of
# columns (add/remove/etc.))
self._add_listener(table)
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
return self.__class__([copy.deepcopy(c, memo) for c in self.columns])
def _copy_column(self, column):
"""Utility function used currently only by _init_from_coldefs
to help convert columns from binary format to ASCII format or vice
versa if necessary (otherwise performs a straight copy).
"""
if isinstance(column.format, self._col_format_cls):
# This column has a FITS format compatible with this column
# definitions class (that is ascii or binary)
return column.copy()
new_column = column.copy()
# Try to use the Numpy recformat as the equivalency between the
# two formats; if that conversion can't be made then these
# columns can't be transferred
# TODO: Catch exceptions here and raise an explicit error about
# column format conversion
new_column.format = self._col_format_cls.from_column_format(column.format)
# Handle a few special cases of column format options that are not
# compatible between ASCII an binary tables
# TODO: This is sort of hacked in right now; we really need
# separate classes for ASCII and Binary table Columns, and they
# should handle formatting issues like these
if not isinstance(new_column.format, _AsciiColumnFormat):
# the column is a binary table column...
new_column.start = None
if new_column.null is not None:
# We can't just "guess" a value to represent null
# values in the new column, so just disable this for
# now; users may modify it later
new_column.null = None
else:
# the column is an ASCII table column...
if new_column.null is not None:
new_column.null = DEFAULT_ASCII_TNULL
if new_column.disp is not None and new_column.disp.upper().startswith("L"):
# ASCII columns may not use the logical data display format;
# for now just drop the TDISPn option for this column as we
# don't have a systematic conversion of boolean data to ASCII
# tables yet
new_column.disp = None
return new_column
def __getattr__(self, name):
"""
Automatically returns the values for the given keyword attribute for
all `Column`s in this list.
Implements for example self.units, self.formats, etc.
"""
cname = name[:-1]
if cname in KEYWORD_ATTRIBUTES and name[-1] == "s":
attr = []
for col in self.columns:
val = getattr(col, cname)
attr.append(val if val is not None else "")
return attr
raise AttributeError(name)
@lazyproperty
def dtype(self):
# Note: This previously returned a dtype that just used the raw field
# widths based on the format's repeat count, and did not incorporate
# field *shapes* as provided by TDIMn keywords.
# Now this incorporates TDIMn from the start, which makes *this* method
# a little more complicated, but simplifies code elsewhere (for example
# fields will have the correct shapes even in the raw recarray).
formats = []
offsets = [0]
for format_, dim in zip(self.formats, self._dims):
dt = format_.dtype
if len(offsets) < len(self.formats):
# Note: the size of the *original* format_ may be greater than
# one would expect from the number of elements determined by
# dim. The FITS format allows this--the rest of the field is
# filled with undefined values.
offsets.append(offsets[-1] + dt.itemsize)
if dim and format_.format not in "PQ":
# Note: VLA array descriptors should not be reshaped
# as they are always of shape (2,)
if format_.format == "A":
dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))
else:
dt = np.dtype((dt.base, dim))
formats.append(dt)
return np.dtype({"names": self.names, "formats": formats, "offsets": offsets})
@lazyproperty
def names(self):
return [col.name for col in self.columns]
@lazyproperty
def formats(self):
return [col.format for col in self.columns]
@lazyproperty
def _arrays(self):
return [col.array for col in self.columns]
@lazyproperty
def _recformats(self):
return [fmt.recformat for fmt in self.formats]
@lazyproperty
def _dims(self):
"""Returns the values of the TDIMn keywords parsed into tuples."""
return [col._dims for col in self.columns]
def __getitem__(self, key):
if isinstance(key, str):
key = _get_index(self.names, key)
x = self.columns[key]
if _is_int(key):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.columns)
def __repr__(self):
rep = "ColDefs("
if hasattr(self, "columns") and self.columns:
# The hasattr check is mostly just useful in debugging sessions
# where self.columns may not be defined yet
rep += "\n "
rep += "\n ".join([repr(c) for c in self.columns])
rep += "\n"
rep += ")"
return rep
def __add__(self, other, option="left"):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.columns)
else:
raise TypeError("Wrong type of input.")
if option == "left":
tmp = list(self.columns) + b
else:
tmp = b + list(self.columns)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, "right")
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx = list(range(len(self)))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _update_column_attribute_changed(self, column, attr, old_value, new_value):
"""
Handle column attribute changed notifications from columns that are
members of this `ColDefs`.
`ColDefs` itself does not currently do anything with this, and just
bubbles the notification up to any listening table HDUs that may need
to update their headers, etc. However, this also informs the table of
the numerical index of the column that changed.
"""
idx = 0
for idx, col in enumerate(self.columns):
if col is column:
break
if attr == "name":
del self.names
elif attr == "format":
del self.formats
self._notify(
"column_attribute_changed", column, idx, attr, old_value, new_value
)
def add_col(self, column):
"""
Append one `Column` to the column definition.
"""
if not isinstance(column, Column):
raise AssertionError
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
self.columns.append(column)
# Listen for changes on the new column
column._add_listener(self)
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
self._notify("column_added", self, column)
return self
def del_col(self, col_name):
"""
Delete (the definition of) one `Column`.
col_name : str or int
The column's name or index
"""
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
indx = _get_index(self.names, col_name)
col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
del self.columns[indx]
col._remove_listener(self)
# If this ColDefs is being tracked by a table HDU, inform the HDU (or
# any other listeners) that the column has been removed
# Just send a reference to self, and the index of the column that was
# removed
self._notify("column_removed", self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
"""
Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.
Parameters
----------
col_name : str or int
The column name or index to change
attrib : str
The attribute name
new_value : object
The new value for the attribute
"""
setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
Change a `Column`'s name.
Parameters
----------
col_name : str
The current name of the column
new_name : str
The new name of the column
"""
if new_name != col_name and new_name in self.names:
raise ValueError(f"New name {new_name} already exists.")
else:
self.change_attrib(col_name, "name", new_name)
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
Parameters
----------
col_name : str or int
The column name or index
new_unit : str
The new unit for the column
"""
self.change_attrib(col_name, "unit", new_unit)
def info(self, attrib="all", output=None):
"""
Get attribute(s) information of the column definition.
Parameters
----------
attrib : str
Can be one or more of the attributes listed in
``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is
``"all"`` which will print out all attributes. It forgives plurals
and blanks. If there are two or more attribute names, they must be
separated by comma(s).
output : file-like, optional
File-like object to output to. Outputs to stdout by default.
If `False`, returns the attributes as a `dict` instead.
Notes
-----
This function doesn't return anything by default; it just prints to
stdout.
"""
if output is None:
output = sys.stdout
if attrib.strip().lower() in ["all", ""]:
lst = KEYWORD_ATTRIBUTES
else:
lst = attrib.split(",")
for idx in range(len(lst)):
lst[idx] = lst[idx].strip().lower()
if lst[idx][-1] == "s":
lst[idx] = list[idx][:-1]
ret = {}
for attr in lst:
if output:
if attr not in KEYWORD_ATTRIBUTES:
output.write(
f"'{attr}' is not an attribute of the column definitions.\n"
)
continue
output.write(f"{attr}:\n")
output.write(f" {getattr(self, attr + 's')}\n")
else:
ret[attr] = getattr(self, attr + "s")
if not output:
return ret
class _AsciiColDefs(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = " "
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = "S" + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ["a" + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
class _VLF(np.ndarray):
"""Variable length field object."""
def __new__(cls, input, dtype="a"):
"""
Parameters
----------
input
a sequence of variable-sized elements.
"""
if dtype == "a":
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
input = [chararray.array(x, itemsize=1) for x in input]
except Exception:
raise ValueError(f"Inconsistent input data array: {input}")
a = np.array(input, dtype=object)
self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a, dtype=object)
self.max = 0
self.element_dtype = dtype
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.max = obj.max
self.element_dtype = obj.element_dtype
def __setitem__(self, key, value):
"""
To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, np.ndarray) and value.dtype == self.dtype:
pass
elif isinstance(value, chararray.chararray) and value.itemsize == 1:
pass
elif self.element_dtype == "a":
value = chararray.array(value, itemsize=1)
else:
value = np.array(value, dtype=self.element_dtype)
np.ndarray.__setitem__(self, key, value)
nelem = value.shape
len_value = np.prod(nelem)
self.max = max(self.max, len_value)
def tolist(self):
return [list(item) for item in super().tolist()]
def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError(f"Key '{key}' does not exist.")
else: # multiple match
raise KeyError(f"Ambiguous key name '{key}'.")
else:
raise KeyError(f"Illegal key '{key!r}'.")
return indx
def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype="uint8")
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])
def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i])
def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == "a":
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == "a":
rowval = " " * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == "a":
data_output[idx] = chararray.array(encode_ascii(rowval), itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
nelem = data_output[idx].shape
descr_output[idx, 0] = np.prod(nelem)
descr_output[idx, 1] = _offset
_offset += descr_output[idx, 0] * _nbytes
return data_output
def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError(f"Format {tform!r} is not recognized.")
if repeat == "":
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option)
def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError(f"Format {tform!r} is not recognized.")
# Be flexible on case
format = match.group("format")
if format is None:
# Floating point format
format = match.group("formatf").upper()
width = match.group("widthf")
precision = match.group("precision")
if width is None or precision is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group("width")
if width is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = (
"Format {!r} is not valid--field width and decimal precision "
"must be integers."
)
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError(
f"Format {tform!r} not valid--field width must be a positive integeter."
)
if precision >= width:
raise VerifyError(
f"Format {tform!r} not valid--the number of decimal digits "
f"must be less than the format's total width {width}."
)
return format, width, precision
def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group("dims")
return tuple(int(d.strip()) for d in dims.split(","))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return tuple()
def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value))
def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == "a" and f2[0] == "a":
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))
def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == "A":
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == "A" and option != "":
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ""
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == "X":
output_format = _FormatX(repeat)
elif dtype == "P":
output_format = _FormatP.from_tform(format)
elif dtype == "Q":
output_format = _FormatQ.from_tform(format)
elif dtype == "F":
output_format = "f8"
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == "U" or (
dtype.subdtype is not None and dtype.subdtype[0].char == "U"
):
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base and dtype.subdtype --dtype for multi-dimensional items
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype="i8").prod()
if nel > 1:
repeat = nel
if kind == "a":
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + "A"
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ""
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
In particular, use of 'a' to refer to character data is long since
deprecated in Numpy, but Astropy remains heavily invested in its use
(something to try to get away from sooner rather than later).
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ("U", "S"):
recformat = kind = "a"
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype
def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format)
def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == "a":
return "A" + str(itemsize)
elif NUMPY2FITS.get(recformat) == "L":
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return "A1"
elif kind == "i":
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS["I"][0])
return "I" + str(width)
elif kind == "f":
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = "D"
else:
format = "E"
width = ".".join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'a', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values, if width >= 10 we may need to accommodate 64-bit ints.
# values [for the non-standard J format code just always force 64-bit]
if format == "I":
if width <= 4:
recformat = "i2"
elif width > 9:
recformat = "i8"
elif format == "A":
recformat += str(width)
return recformat
def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = (
tdisp[0]
if tdisp[0] != "E" or (len(tdisp) > 1 and tdisp[1] not in "NS")
else tdisp[:2]
)
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f"Format {tdisp} is not recognized.")
match = tdisp_re.match(tdisp.strip())
if not match or match.group("formatc") is None:
raise VerifyError(f"Format {tdisp} is not recognized.")
formatc = match.group("formatc")
width = match.group("width")
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ("I", "B", "O", "Z", "F", "E", "G", "D"):
precision = match.group("precision")
if precision is None:
precision = 1
if tdisp[0] in ("E", "D", "G") and tdisp[1] not in ("N", "S"):
exponential = match.group("exponential")
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError(f"Format {format_type} is not recognized.")
def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {
"a": "A",
"s": "A",
"d": "I",
"b": "B",
"o": "O",
"x": "Z",
"X": "Z",
"f": "F",
"F": "F",
"g": "G",
"G": "G",
"e": "E",
"E": "E",
}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == "{" and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip("}")
elif format_string[0] == "%":
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = "", ""
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == ">" and fmt_str[1] != "0":
ftype = fmt_to_tdisp["a"]
width = fmt_str[1:]
elif fmt_str[-1] == "s" and fmt_str != "s":
ftype = fmt_to_tdisp["a"]
width = fmt_str[:-1].lstrip("0")
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != "0":
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if "." in fmt_str:
width, precision = fmt_str.split(".")
sep = "."
if width == "":
key = ftype if ftype != "G" else "F"
width = str(
int(precision)
+ (ASCII_DEFAULT_WIDTHS[key][0] - ASCII_DEFAULT_WIDTHS[key][1])
)
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn(
f"Format {format_string} cannot be mapped to the accepted TDISPn "
"keyword values. Format will not be moved into TDISPn keyword.",
AstropyUserWarning,
)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = "L"
return ftype + width + sep + precision
|
b330b82cd28d03b54cf94e0e2cb2aee5e75ec84b5226ea41ac9b8b3a35d994c8 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import itertools
import numbers
import os
import re
import warnings
from astropy.utils import isiterable
from astropy.utils.exceptions import AstropyUserWarning
from ._utils import parse_header
from .card import KEYWORD_LENGTH, UNDEFINED, Card, _pad
from .file import _File
from .util import (
decode_ascii,
encode_ascii,
fileobj_closed,
fileobj_is_binary,
path_like,
)
BLOCK_SIZE = 2880 # the FITS block size
# This regular expression can match a *valid* END card which just consists of
# the string 'END' followed by all spaces, or an *invalid* end card which
# consists of END, followed by any character that is *not* a valid character
# for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which
# starts with 'END' but is not 'END'), followed by any arbitrary bytes. An
# invalid end card may also consist of just 'END' with no trailing bytes.
HEADER_END_RE = re.compile(
encode_ascii(r"(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])")
)
# According to the FITS standard the only characters that may appear in a
# header record are the restricted ASCII chars from 0x20 through 0x7E.
VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F)))
END_CARD = "END" + " " * 77
_commentary_keywords = Card._commentary_keywords
__doctest_skip__ = [
"Header",
"Header.comments",
"Header.fromtextfile",
"Header.totextfile",
"Header.set",
"Header.update",
]
class Header:
"""
FITS header class. This class exposes both a dict-like interface and a
list-like interface to FITS headers.
The header may be indexed by keyword and, like a dict, the associated value
will be returned. When the header contains cards with duplicate keywords,
only the value of the first card with the given keyword will be returned.
It is also possible to use a 2-tuple as the index in the form (keyword,
n)--this returns the n-th value with that keyword, in the case where there
are duplicate keywords.
For example::
>>> header['NAXIS']
0
>>> header[('FOO', 1)] # Return the value of the second FOO keyword
'foo'
The header may also be indexed by card number::
>>> header[0] # Return the value of the first card in the header
'T'
Commentary keywords such as HISTORY and COMMENT are special cases: When
indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all
the HISTORY/COMMENT values is returned::
>>> header['HISTORY']
This is the first history entry in this header.
This is the second history entry in this header.
...
See the Astropy documentation for more details on working with headers.
Notes
-----
Although FITS keywords must be exclusively upper case, retrieving an item
in a `Header` object is case insensitive.
"""
def __init__(self, cards=[], copy=False):
"""
Construct a `Header` from an iterable and/or text file.
Parameters
----------
cards : list of `Card`, optional
The cards to initialize the header with. Also allowed are other
`Header` (or `dict`-like) objects.
.. versionchanged:: 1.2
Allowed ``cards`` to be a `dict`-like object.
copy : bool, optional
If ``True`` copies the ``cards`` if they were another `Header`
instance.
Default is ``False``.
.. versionadded:: 1.3
"""
self.clear()
if isinstance(cards, Header):
if copy:
cards = cards.copy()
cards = cards.cards
elif isinstance(cards, dict):
cards = cards.items()
for card in cards:
self.append(card, end=True)
self._modified = False
def __len__(self):
return len(self._cards)
def __iter__(self):
for card in self._cards:
yield card.keyword
def __contains__(self, keyword):
if keyword in self._keyword_indices or keyword in self._rvkc_indices:
# For the most common case (single, standard form keyword lookup)
# this will work and is an O(1) check. If it fails that doesn't
# guarantee absence, just that we have to perform the full set of
# checks in self._cardindex
return True
try:
self._cardindex(keyword)
except (KeyError, IndexError):
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
return self.__class__([copy.copy(c) for c in self._cards[key]])
elif self._haswildcard(key):
return self.__class__(
[copy.copy(self._cards[idx]) for idx in self._wildcardmatch(key)]
)
elif isinstance(key, str):
key = key.strip()
if key.upper() in _commentary_keywords:
key = key.upper()
# Special case for commentary cards
return _HeaderCommentaryCards(self, key)
if isinstance(key, tuple):
keyword = key[0]
else:
keyword = key
card = self._cards[self._cardindex(key)]
if card.field_specifier is not None and keyword == card.rawkeyword:
# This is RVKC; if only the top-level keyword was specified return
# the raw value, not the parsed out float value
return card.rawvalue
value = card.value
if value == UNDEFINED:
return None
return value
def __setitem__(self, key, value):
if self._set_slice(key, value, self):
return
if isinstance(value, tuple):
if len(value) > 2:
raise ValueError(
"A Header item may be set with either a scalar value, "
"a 1-tuple containing a scalar value, or a 2-tuple "
"containing a scalar value and comment string."
)
if len(value) == 1:
value, comment = value[0], None
if value is None:
value = UNDEFINED
elif len(value) == 2:
value, comment = value
if value is None:
value = UNDEFINED
if comment is None:
comment = ""
else:
comment = None
card = None
if isinstance(key, numbers.Integral):
card = self._cards[key]
elif isinstance(key, tuple):
card = self._cards[self._cardindex(key)]
if value is None:
value = UNDEFINED
if card:
card.value = value
if comment is not None:
card.comment = comment
if card._modified:
self._modified = True
else:
# If we get an IndexError that should be raised; we don't allow
# assignment to non-existing indices
self._update((key, value, comment))
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# This is very inefficient but it's not a commonly used feature.
# If someone out there complains that they make heavy use of slice
# deletions and it's too slow, well, we can worry about it then
# [the solution is not too complicated--it would be wait 'til all
# the cards are deleted before updating _keyword_indices rather
# than updating it once for each card that gets deleted]
if isinstance(key, slice):
indices = range(*key.indices(len(self)))
# If the slice step is backwards we want to reverse it, because
# it will be reversed in a few lines...
if key.step and key.step < 0:
indices = reversed(indices)
else:
indices = self._wildcardmatch(key)
for idx in reversed(indices):
del self[idx]
return
elif isinstance(key, str):
# delete ALL cards with the same keyword name
key = Card.normalize_keyword(key)
indices = self._keyword_indices
if key not in self._keyword_indices:
indices = self._rvkc_indices
if key not in indices:
# if keyword is not present raise KeyError.
# To delete keyword without caring if they were present,
# Header.remove(Keyword) can be used with optional argument ignore_missing as True
raise KeyError(f"Keyword '{key}' not found.")
for idx in reversed(indices[key]):
# Have to copy the indices list since it will be modified below
del self[idx]
return
idx = self._cardindex(key)
card = self._cards[idx]
keyword = card.keyword
del self._cards[idx]
keyword = Card.normalize_keyword(keyword)
indices = self._keyword_indices[keyword]
indices.remove(idx)
if not indices:
del self._keyword_indices[keyword]
# Also update RVKC indices if necessary :/
if card.field_specifier is not None:
indices = self._rvkc_indices[card.rawkeyword]
indices.remove(idx)
if not indices:
del self._rvkc_indices[card.rawkeyword]
# We also need to update all other indices
self._updateindices(idx, increment=False)
self._modified = True
def __repr__(self):
return self.tostring(sep="\n", endcard=False, padding=False)
def __str__(self):
return self.tostring()
def __eq__(self, other):
"""
Two Headers are equal only if they have the exact same string
representation.
"""
return str(self) == str(other)
def __add__(self, other):
temp = self.copy(strip=False)
temp.extend(other)
return temp
def __iadd__(self, other):
self.extend(other)
return self
def _ipython_key_completions_(self):
return self.__iter__()
@property
def cards(self):
"""
The underlying physical cards that make up this Header; it can be
looked at, but it should not be modified directly.
"""
return _CardAccessor(self)
@property
def comments(self):
"""
View the comments associated with each keyword, if any.
For example, to see the comment on the NAXIS keyword:
>>> header.comments['NAXIS']
number of data axes
Comments can also be updated through this interface:
>>> header.comments['NAXIS'] = 'Number of data axes'
"""
return _HeaderComments(self)
@property
def _modified(self):
"""
Whether or not the header has been modified; this is a property so that
it can also check each card for modifications--cards may have been
modified directly without the header containing it otherwise knowing.
"""
modified_cards = any(c._modified for c in self._cards)
if modified_cards:
# If any cards were modified then by definition the header was
# modified
self.__dict__["_modified"] = True
return self.__dict__["_modified"]
@_modified.setter
def _modified(self, val):
self.__dict__["_modified"] = val
@classmethod
def fromstring(cls, data, sep=""):
"""
Creates an HDU header from a byte string containing the entire header
data.
Parameters
----------
data : str or bytes
String or bytes containing the entire header. In the case of bytes
they will be decoded using latin-1 (only plain ASCII characters are
allowed in FITS headers but latin-1 allows us to retain any invalid
bytes that might appear in malformatted FITS files).
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file). In general this is only used in cases where a header was
printed as text (e.g. with newlines after each card) and you want
to create a new `Header` from it by copy/pasting.
Examples
--------
>>> from astropy.io.fits import Header
>>> hdr = Header({'SIMPLE': True})
>>> Header.fromstring(hdr.tostring()) == hdr
True
If you want to create a `Header` from printed text it's not necessary
to have the exact binary structure as it would appear in a FITS file,
with the full 80 byte card length. Rather, each "card" can end in a
newline and does not have to be padded out to a full card length as
long as it "looks like" a FITS header:
>>> hdr = Header.fromstring(\"\"\"\\
... SIMPLE = T / conforms to FITS standard
... BITPIX = 8 / array data type
... NAXIS = 0 / number of array dimensions
... EXTEND = T
... \"\"\", sep='\\n')
>>> hdr['SIMPLE']
True
>>> hdr['BITPIX']
8
>>> len(hdr)
4
Returns
-------
`Header`
A new `Header` instance.
"""
cards = []
# If the card separator contains characters that may validly appear in
# a card, the only way to unambiguously distinguish between cards is to
# require that they be Card.length long. However, if the separator
# contains non-valid characters (namely \n) the cards may be split
# immediately at the separator
require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS)
if isinstance(data, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place--accepting it here still gives us the
# opportunity to display warnings later during validation
CONTINUE = b"CONTINUE"
END = b"END"
end_card = END_CARD.encode("ascii")
sep = sep.encode("latin1")
empty = b""
else:
CONTINUE = "CONTINUE"
END = "END"
end_card = END_CARD
empty = ""
# Split the header into individual cards
idx = 0
image = []
while idx < len(data):
if require_full_cardlength:
end_idx = idx + Card.length
else:
try:
end_idx = data.index(sep, idx)
except ValueError:
end_idx = len(data)
next_image = data[idx:end_idx]
idx = end_idx + len(sep)
if image:
if next_image[:8] == CONTINUE:
image.append(next_image)
continue
cards.append(Card.fromstring(empty.join(image)))
if require_full_cardlength:
if next_image == end_card:
image = []
break
else:
if next_image.split(sep)[0].rstrip() == END:
image = []
break
image = [next_image]
# Add the last image that was found before the end, if any
if image:
cards.append(Card.fromstring(empty.join(image)))
return cls._fromcards(cards)
@classmethod
def fromfile(cls, fileobj, sep="", endcard=True, padding=True):
"""
Similar to :meth:`Header.fromstring`, but reads the header string from
a given file-like object or filename.
Parameters
----------
fileobj : str, file-like
A filename or an open file-like object from which a FITS header is
to be read. For open file handles the file pointer must be at the
beginning of the header.
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file).
endcard : bool, optional
If True (the default) the header must end with an END card in order
to be considered valid. If an END card is not found an
`OSError` is raised.
padding : bool, optional
If True (the default) the header will be required to be padded out
to a multiple of 2880, the FITS header block size. Otherwise any
padding, or lack thereof, is ignored.
Returns
-------
`Header`
A new `Header` instance.
"""
close_file = False
if isinstance(fileobj, path_like):
# If sep is non-empty we are trying to read a header printed to a
# text file, so open in text mode by default to support newline
# handling; if a binary-mode file object is passed in, the user is
# then on their own w.r.t. newline handling.
#
# Otherwise assume we are reading from an actual FITS file and open
# in binary mode.
fileobj = os.path.expanduser(fileobj)
if sep:
fileobj = open(fileobj, encoding="latin1")
else:
fileobj = open(fileobj, "rb")
close_file = True
try:
is_binary = fileobj_is_binary(fileobj)
def block_iter(nbytes):
while True:
data = fileobj.read(nbytes)
if data:
yield data
else:
break
return cls._from_blocks(block_iter, is_binary, sep, endcard, padding)[1]
finally:
if close_file:
fileobj.close()
@classmethod
def _fromcards(cls, cards):
header = cls()
for idx, card in enumerate(cards):
header._cards.append(card)
keyword = Card.normalize_keyword(card.keyword)
header._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
header._rvkc_indices[card.rawkeyword].append(idx)
header._modified = False
return header
@classmethod
def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding):
"""
The meat of `Header.fromfile`; in a separate method so that
`Header.fromfile` itself is just responsible for wrapping file
handling. Also used by `_BaseHDU.fromstring`.
``block_iter`` should be a callable which, given a block size n
(typically 2880 bytes as used by the FITS standard) returns an iterator
of byte strings of that block size.
``is_binary`` specifies whether the returned blocks are bytes or text
Returns both the entire header *string*, and the `Header` object
returned by Header.fromstring on that string.
"""
actual_block_size = _block_size(sep)
clen = Card.length + len(sep)
blocks = block_iter(actual_block_size)
# Read the first header block.
try:
block = next(blocks)
except StopIteration:
raise EOFError()
if not is_binary:
# TODO: There needs to be error handling at *this* level for
# non-ASCII characters; maybe at this stage decoding latin-1 might
# be safer
block = encode_ascii(block)
read_blocks = []
is_eof = False
end_found = False
# continue reading header blocks until END card or EOF is reached
while True:
# find the END card
end_found, block = cls._find_end_card(block, clen)
read_blocks.append(decode_ascii(block))
if end_found:
break
try:
block = next(blocks)
except StopIteration:
is_eof = True
break
if not block:
is_eof = True
break
if not is_binary:
block = encode_ascii(block)
header_str = "".join(read_blocks)
_check_padding(header_str, actual_block_size, is_eof, check_block_size=padding)
if not end_found and is_eof and endcard:
# TODO: Pass this error to validation framework as an ERROR,
# rather than raising an exception
raise OSError("Header missing END card.")
return header_str, cls.fromstring(header_str, sep=sep)
@classmethod
def _find_end_card(cls, block, card_len):
"""
Utility method to search a header block for the END card and handle
invalid END cards.
This method can also returned a modified copy of the input header block
in case an invalid end card needs to be sanitized.
"""
for mo in HEADER_END_RE.finditer(block):
# Ensure the END card was found, and it started on the
# boundary of a new card (see ticket #142)
if mo.start() % card_len != 0:
continue
# This must be the last header block, otherwise the
# file is malformatted
if mo.group("invalid"):
offset = mo.start()
trailing = block[offset + 3 : offset + card_len - 3].rstrip()
if trailing:
trailing = repr(trailing).lstrip("ub")
# TODO: Pass this warning up to the validation framework
warnings.warn(
"Unexpected bytes trailing END keyword: {}; these "
"bytes will be replaced with spaces on write.".format(trailing),
AstropyUserWarning,
)
else:
# TODO: Pass this warning up to the validation framework
warnings.warn(
"Missing padding to end of the FITS block after the "
"END keyword; additional spaces will be appended to "
f"the file upon writing to pad out to {BLOCK_SIZE} bytes.",
AstropyUserWarning,
)
# Sanitize out invalid END card now that the appropriate
# warnings have been issued
block = (
block[:offset]
+ encode_ascii(END_CARD)
+ block[offset + len(END_CARD) :]
)
return True, block
return False, block
def tostring(self, sep="", endcard=True, padding=True):
r"""
Returns a string representation of the header.
By default this uses no separator between cards, adds the END card, and
pads the string with spaces to the next multiple of 2880 bytes. That
is, it returns the header exactly as it would appear in a FITS file.
Parameters
----------
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If True (default) adds the END card to the end of the header
string
padding : bool, optional
If True (default) pads the string with spaces out to the next
multiple of 2880 characters
Returns
-------
str
A string representing a FITS header.
"""
lines = []
for card in self._cards:
s = str(card)
# Cards with CONTINUE cards may be longer than 80 chars; so break
# them into multiple lines
while s:
lines.append(s[: Card.length])
s = s[Card.length :]
s = sep.join(lines)
if endcard:
s += sep + _pad("END")
if padding:
s += " " * _pad_length(len(s))
return s
def tofile(self, fileobj, sep="", endcard=True, padding=True, overwrite=False):
r"""
Writes the header to file or file-like object.
By default this writes the header exactly as it would be written to a
FITS file, with the END card included and padding to the next multiple
of 2880 bytes. However, aspects of this may be controlled.
Parameters
----------
fileobj : path-like or file-like, optional
Either the pathname of a file, or an open file handle or file-like
object.
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If `True` (default) adds the END card to the end of the header
string
padding : bool, optional
If `True` (default) pads the string with spaces out to the next
multiple of 2880 characters
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
"""
close_file = fileobj_closed(fileobj)
if not isinstance(fileobj, _File):
fileobj = _File(fileobj, mode="ostream", overwrite=overwrite)
try:
blocks = self.tostring(sep=sep, endcard=endcard, padding=padding)
actual_block_size = _block_size(sep)
if padding and len(blocks) % actual_block_size != 0:
raise OSError(
"Header size ({}) is not a multiple of block size ({}).".format(
len(blocks) - actual_block_size + BLOCK_SIZE, BLOCK_SIZE
)
)
fileobj.flush()
fileobj.write(blocks.encode("ascii"))
fileobj.flush()
finally:
if close_file:
fileobj.close()
@classmethod
def fromtextfile(cls, fileobj, endcard=False):
"""
Read a header from a simple text file or file-like object.
Equivalent to::
>>> Header.fromfile(fileobj, sep='\\n', endcard=False,
... padding=False)
See Also
--------
fromfile
"""
return cls.fromfile(fileobj, sep="\n", endcard=endcard, padding=False)
def totextfile(self, fileobj, endcard=False, overwrite=False):
"""
Write the header as text to a file or a file-like object.
Equivalent to::
>>> Header.tofile(fileobj, sep='\\n', endcard=False,
... padding=False, overwrite=overwrite)
See Also
--------
tofile
"""
self.tofile(
fileobj, sep="\n", endcard=endcard, padding=False, overwrite=overwrite
)
def clear(self):
"""
Remove all cards from the header.
"""
self._cards = []
self._keyword_indices = collections.defaultdict(list)
self._rvkc_indices = collections.defaultdict(list)
def copy(self, strip=False):
"""
Make a copy of the :class:`Header`.
.. versionchanged:: 1.3
`copy.copy` and `copy.deepcopy` on a `Header` will call this
method.
Parameters
----------
strip : bool, optional
If `True`, strip any headers that are specific to one of the
standard HDU types, so that this header can be used in a different
HDU.
Returns
-------
`Header`
A new :class:`Header` instance.
"""
tmp = self.__class__(copy.copy(card) for card in self._cards)
if strip:
tmp.strip()
return tmp
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args, **kwargs):
return self.copy()
@classmethod
def fromkeys(cls, iterable, value=None):
"""
Similar to :meth:`dict.fromkeys`--creates a new `Header` from an
iterable of keywords and an optional default value.
This method is not likely to be particularly useful for creating real
world FITS headers, but it is useful for testing.
Parameters
----------
iterable
Any iterable that returns strings representing FITS keywords.
value : optional
A default value to assign to each keyword; must be a valid type for
FITS keywords.
Returns
-------
`Header`
A new `Header` instance.
"""
d = cls()
if not isinstance(value, tuple):
value = (value,)
for key in iterable:
d.append((key,) + value)
return d
def get(self, key, default=None):
"""
Similar to :meth:`dict.get`--returns the value associated with keyword
in the header, or a default value if the keyword is not found.
Parameters
----------
key : str
A keyword that may or may not be in the header.
default : optional
A default value to return if the keyword is not found in the
header.
Returns
-------
value: str, number, complex, bool, or ``astropy.io.fits.card.Undefined``
The value associated with the given keyword, or the default value
if the keyword is not in the header.
"""
try:
return self[key]
except (KeyError, IndexError):
return default
def set(self, keyword, value=None, comment=None, before=None, after=None):
"""
Set the value and/or comment and/or position of a specified keyword.
If the keyword does not already exist in the header, a new keyword is
created in the specified position, or appended to the end of the header
if no position is specified.
This method is similar to :meth:`Header.update` prior to Astropy v0.1.
.. note::
It should be noted that ``header.set(keyword, value)`` and
``header.set(keyword, value, comment)`` are equivalent to
``header[keyword] = value`` and
``header[keyword] = (value, comment)`` respectively.
New keywords can also be inserted relative to existing keywords
using, for example::
>>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
to insert before an existing keyword, or::
>>> header.insert('NAXIS', ('NAXIS1', 4096), after=True)
to insert after an existing keyword.
The only advantage of using :meth:`Header.set` is that it
easily replaces the old usage of :meth:`Header.update` both
conceptually and in terms of function signature.
Parameters
----------
keyword : str
A header keyword
value : str, optional
The value to set for the given keyword; if None the existing value
is kept, but '' may be used to set a blank value
comment : str, optional
The comment to set for the given keyword; if None the existing
comment is kept, but ``''`` may be used to set a blank comment
before : str, int, optional
Name of the keyword, or index of the `Card` before which this card
should be located in the header. The argument ``before`` takes
precedence over ``after`` if both specified.
after : str, int, optional
Name of the keyword, or index of the `Card` after which this card
should be located in the header.
"""
# Create a temporary card that looks like the one being set; if the
# temporary card turns out to be a RVKC this will make it easier to
# deal with the idiosyncrasies thereof
# Don't try to make a temporary card though if they keyword looks like
# it might be a HIERARCH card or is otherwise invalid--this step is
# only for validating RVKCs.
if (
len(keyword) <= KEYWORD_LENGTH
and Card._keywd_FSC_RE.match(keyword)
and keyword not in self._keyword_indices
):
new_card = Card(keyword, value, comment)
new_keyword = new_card.keyword
else:
new_keyword = keyword
if new_keyword not in _commentary_keywords and new_keyword in self:
if comment is None:
comment = self.comments[keyword]
if value is None:
value = self[keyword]
self[keyword] = (value, comment)
if before is not None or after is not None:
card = self._cards[self._cardindex(keyword)]
self._relativeinsert(card, before=before, after=after, replace=True)
elif before is not None or after is not None:
self._relativeinsert((keyword, value, comment), before=before, after=after)
else:
self[keyword] = (value, comment)
def items(self):
"""Like :meth:`dict.items`."""
for card in self._cards:
yield card.keyword, None if card.value == UNDEFINED else card.value
def keys(self):
"""
Like :meth:`dict.keys`--iterating directly over the `Header`
instance has the same behavior.
"""
for card in self._cards:
yield card.keyword
def values(self):
"""Like :meth:`dict.values`."""
for card in self._cards:
yield None if card.value == UNDEFINED else card.value
def pop(self, *args):
"""
Works like :meth:`list.pop` if no arguments or an index argument are
supplied; otherwise works like :meth:`dict.pop`.
"""
if len(args) > 2:
raise TypeError(f"Header.pop expected at most 2 arguments, got {len(args)}")
if len(args) == 0:
key = -1
else:
key = args[0]
try:
value = self[key]
except (KeyError, IndexError):
if len(args) == 2:
return args[1]
raise
del self[key]
return value
def popitem(self):
"""Similar to :meth:`dict.popitem`."""
try:
k, v = next(self.items())
except StopIteration:
raise KeyError("Header is empty")
del self[k]
return k, v
def setdefault(self, key, default=None):
"""Similar to :meth:`dict.setdefault`."""
try:
return self[key]
except (KeyError, IndexError):
self[key] = default
return default
def update(self, *args, **kwargs):
"""
Update the Header with new keyword values, updating the values of
existing keywords and appending new keywords otherwise; similar to
`dict.update`.
`update` accepts either a dict-like object or an iterable. In the
former case the keys must be header keywords and the values may be
either scalar values or (value, comment) tuples. In the case of an
iterable the items must be (keyword, value) tuples or (keyword, value,
comment) tuples.
Arbitrary arguments are also accepted, in which case the update() is
called again with the kwargs dict as its only argument. That is,
::
>>> header.update(NAXIS1=100, NAXIS2=100)
is equivalent to::
header.update({'NAXIS1': 100, 'NAXIS2': 100})
.. warning::
As this method works similarly to `dict.update` it is very
different from the ``Header.update()`` method in Astropy v0.1.
Use of the old API was
**deprecated** for a long time and is now removed. Most uses of the
old API can be replaced as follows:
* Replace ::
header.update(keyword, value)
with ::
header[keyword] = value
* Replace ::
header.update(keyword, value, comment=comment)
with ::
header[keyword] = (value, comment)
* Replace ::
header.update(keyword, value, before=before_keyword)
with ::
header.insert(before_keyword, (keyword, value))
* Replace ::
header.update(keyword, value, after=after_keyword)
with ::
header.insert(after_keyword, (keyword, value),
after=True)
See also :meth:`Header.set` which is a new method that provides an
interface similar to the old ``Header.update()`` and may help make
transition a little easier.
"""
if args:
other = args[0]
else:
other = None
def update_from_dict(k, v):
if not isinstance(v, tuple):
card = Card(k, v)
elif 0 < len(v) <= 2:
card = Card(*((k,) + v))
else:
raise ValueError(
"Header update value for key %r is invalid; the "
"value must be either a scalar, a 1-tuple "
"containing the scalar value, or a 2-tuple "
"containing the value and a comment string." % k
)
self._update(card)
if other is None:
pass
elif isinstance(other, Header):
for card in other.cards:
self._update(card)
elif hasattr(other, "items"):
for k, v in other.items():
update_from_dict(k, v)
elif hasattr(other, "keys"):
for k in other.keys():
update_from_dict(k, other[k])
else:
for idx, card in enumerate(other):
if isinstance(card, Card):
self._update(card)
elif isinstance(card, tuple) and (1 < len(card) <= 3):
self._update(Card(*card))
else:
raise ValueError(
"Header update sequence item #{} is invalid; "
"the item must either be a 2-tuple containing "
"a keyword and value, or a 3-tuple containing "
"a keyword, value, and comment string.".format(idx)
)
if kwargs:
self.update(kwargs)
def append(self, card=None, useblanks=True, bottom=False, end=False):
"""
Appends a new keyword+value card to the end of the Header, similar
to `list.append`.
By default if the last cards in the Header have commentary keywords,
this will append the new keyword before the commentary (unless the new
keyword is also commentary).
Also differs from `list.append` in that it can be called with no
arguments: In this case a blank card is appended to the end of the
Header. In the case all the keyword arguments are ignored.
Parameters
----------
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple representing a
single header card; the comment is optional in which case a
2-tuple may be used
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
bottom : bool, optional
If True, instead of appending after the last non-commentary card,
append after the last non-blank card.
end : bool, optional
If True, ignore the useblanks and bottom options, and append at the
very end of the Header.
"""
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
"The value appended to a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if not end and card.is_blank:
# Blank cards should always just be appended to the end
end = True
if end:
self._cards.append(card)
idx = len(self._cards) - 1
else:
idx = len(self._cards) - 1
while idx >= 0 and self._cards[idx].is_blank:
idx -= 1
if not bottom and card.keyword not in _commentary_keywords:
while idx >= 0 and self._cards[idx].keyword in _commentary_keywords:
idx -= 1
idx += 1
self._cards.insert(idx, card)
self._updateindices(idx)
keyword = Card.normalize_keyword(card.keyword)
self._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
self._rvkc_indices[card.rawkeyword].append(idx)
if not end:
# If the appended card was a commentary card, and it was appended
# before existing cards with the same keyword, the indices for
# cards with that keyword may have changed
if not bottom and card.keyword in _commentary_keywords:
self._keyword_indices[keyword].sort()
# Finally, if useblanks, delete a blank cards from the end
if useblanks and self._countblanks():
# Don't do this unless there is at least one blanks at the end
# of the header; we need to convert the card to its string
# image to see how long it is. In the vast majority of cases
# this will just be 80 (Card.length) but it may be longer for
# CONTINUE cards
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def extend(
self,
cards,
strip=True,
unique=False,
update=False,
update_first=False,
useblanks=True,
bottom=False,
end=False,
):
"""
Appends multiple keyword+value cards to the end of the header, similar
to `list.extend`.
Parameters
----------
cards : iterable
An iterable of (keyword, value, [comment]) tuples; see
`Header.append`.
strip : bool, optional
Remove any keywords that have meaning only to specific types of
HDUs, so that only more general keywords are added from extension
Header or Card list (default: `True`).
unique : bool, optional
If `True`, ensures that no duplicate keywords are appended;
keywords already in this header are simply discarded. The
exception is commentary keywords (COMMENT, HISTORY, etc.): they are
only treated as duplicates if their values match.
update : bool, optional
If `True`, update the current header with the values and comments
from duplicate keywords in the input header. This supersedes the
``unique`` argument. Commentary keywords are treated the same as
if ``unique=True``.
update_first : bool, optional
If the first keyword in the header is 'SIMPLE', and the first
keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is
replaced by the 'XTENSION' keyword. Likewise if the first keyword
in the header is 'XTENSION' and the first keyword in the input
header is 'SIMPLE', the 'XTENSION' keyword is replaced by the
'SIMPLE' keyword. This behavior is otherwise dumb as to whether or
not the resulting header is a valid primary or extension header.
This is mostly provided to support backwards compatibility with the
old ``Header.fromTxtFile`` method, and only applies if
``update=True``.
useblanks, bottom, end : bool, optional
These arguments are passed to :meth:`Header.append` while appending
new cards to the header.
"""
temp = self.__class__(cards)
if strip:
temp.strip()
if len(self):
first = self._cards[0].keyword
else:
first = None
# We don't immediately modify the header, because first we need to sift
# out any duplicates in the new header prior to adding them to the
# existing header, but while *allowing* duplicates from the header
# being extended from (see ticket #156)
extend_cards = []
for idx, card in enumerate(temp.cards):
keyword = card.keyword
if keyword not in _commentary_keywords:
if unique and not update and keyword in self:
continue
elif update:
if idx == 0 and update_first:
# Dumbly update the first keyword to either SIMPLE or
# XTENSION as the case may be, as was in the case in
# Header.fromTxtFile
if (keyword == "SIMPLE" and first == "XTENSION") or (
keyword == "XTENSION" and first == "SIMPLE"
):
del self[0]
self.insert(0, card)
else:
self[keyword] = (card.value, card.comment)
elif keyword in self:
self[keyword] = (card.value, card.comment)
else:
extend_cards.append(card)
else:
extend_cards.append(card)
else:
if (unique or update) and keyword in self:
if card.is_blank:
extend_cards.append(card)
continue
for value in self[keyword]:
if value == card.value:
break
else:
extend_cards.append(card)
else:
extend_cards.append(card)
for card in extend_cards:
self.append(card, useblanks=useblanks, bottom=bottom, end=end)
def count(self, keyword):
"""
Returns the count of the given keyword in the header, similar to
`list.count` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword to count instances of in the header
"""
keyword = Card.normalize_keyword(keyword)
# We have to look before we leap, since otherwise _keyword_indices,
# being a defaultdict, will create an entry for the nonexistent keyword
if keyword not in self._keyword_indices:
raise KeyError(f"Keyword {keyword!r} not found.")
return len(self._keyword_indices[keyword])
def index(self, keyword, start=None, stop=None):
"""
Returns the index if the first instance of the given keyword in the
header, similar to `list.index` if the Header object is treated as a
list of keywords.
Parameters
----------
keyword : str
The keyword to look up in the list of all keywords in the header
start : int, optional
The lower bound for the index
stop : int, optional
The upper bound for the index
"""
if start is None:
start = 0
if stop is None:
stop = len(self._cards)
if stop < start:
step = -1
else:
step = 1
norm_keyword = Card.normalize_keyword(keyword)
for idx in range(start, stop, step):
if self._cards[idx].keyword.upper() == norm_keyword:
return idx
else:
raise ValueError(f"The keyword {keyword!r} is not in the header.")
def insert(self, key, card, useblanks=True, after=False):
"""
Inserts a new keyword+value card into the Header at a given location,
similar to `list.insert`.
Parameters
----------
key : int, str, or tuple
The index into the list of header keywords before which the
new keyword should be inserted, or the name of a keyword before
which the new keyword should be inserted. Can also accept a
(keyword, index) tuple for inserting around duplicate keywords.
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple; see
`Header.append`
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
after : bool, optional
If set to `True`, insert *after* the specified index or keyword,
rather than before it. Defaults to `False`.
"""
if not isinstance(key, numbers.Integral):
# Don't pass through ints to _cardindex because it will not take
# kindly to indices outside the existing number of cards in the
# header, which insert needs to be able to support (for example
# when inserting into empty headers)
idx = self._cardindex(key)
else:
idx = key
if after:
if idx == -1:
idx = len(self._cards)
else:
idx += 1
if idx >= len(self._cards):
# This is just an append (Though it must be an append absolutely to
# the bottom, ignoring blanks, etc.--the point of the insert method
# is that you get exactly what you asked for with no surprises)
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
"The value inserted into a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
self._cards.insert(idx, card)
keyword = card.keyword
# If idx was < 0, determine the actual index according to the rules
# used by list.insert()
if idx < 0:
idx += len(self._cards) - 1
if idx < 0:
idx = 0
# All the keyword indices above the insertion point must be updated
self._updateindices(idx)
keyword = Card.normalize_keyword(keyword)
self._keyword_indices[keyword].append(idx)
count = len(self._keyword_indices[keyword])
if count > 1:
# There were already keywords with this same name
if keyword not in _commentary_keywords:
warnings.warn(
"A {!r} keyword already exists in this header. Inserting "
"duplicate keyword.".format(keyword),
AstropyUserWarning,
)
self._keyword_indices[keyword].sort()
if card.field_specifier is not None:
# Update the index of RVKC as well
rvkc_indices = self._rvkc_indices[card.rawkeyword]
rvkc_indices.append(idx)
rvkc_indices.sort()
if useblanks:
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def remove(self, keyword, ignore_missing=False, remove_all=False):
"""
Removes the first instance of the given keyword from the header similar
to `list.remove` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword of which to remove the first instance in the header.
ignore_missing : bool, optional
When True, ignores missing keywords. Otherwise, if the keyword
is not present in the header a KeyError is raised.
remove_all : bool, optional
When True, all instances of keyword will be removed.
Otherwise only the first instance of the given keyword is removed.
"""
keyword = Card.normalize_keyword(keyword)
if keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
if remove_all:
while keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
elif not ignore_missing:
raise KeyError(f"Keyword '{keyword}' not found.")
def rename_keyword(self, oldkeyword, newkeyword, force=False):
"""
Rename a card's keyword in the header.
Parameters
----------
oldkeyword : str or int
Old keyword or card index
newkeyword : str
New keyword
force : bool, optional
When `True`, if the new keyword already exists in the header, force
the creation of a duplicate keyword. Otherwise a
`ValueError` is raised.
"""
old = Card.normalize_keyword(oldkeyword)
new = Card.normalize_keyword(newkeyword)
if new == "CONTINUE":
raise ValueError("Can not rename to CONTINUE")
if new in _commentary_keywords or old in _commentary_keywords:
if not (new in _commentary_keywords and old in _commentary_keywords):
raise ValueError(
"Regular and commentary keys can not be renamed to each other."
)
elif not force and new in self:
raise ValueError(f"Intended keyword {new} already exists in header.")
idx = self.index(old)
card = self._cards[idx]
del self[idx]
self.insert(idx, (new, card.value, card.comment))
def add_history(self, value, before=None, after=None):
"""
Add a ``HISTORY`` card.
Parameters
----------
value : str
History text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("HISTORY", value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""
Add a ``COMMENT`` card.
Parameters
----------
value : str
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("COMMENT", value, before=before, after=after)
def add_blank(self, value="", before=None, after=None):
"""
Add a blank card.
Parameters
----------
value : str, optional
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary("", value, before=before, after=after)
def strip(self):
"""
Strip cards specific to a certain kind of header.
Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of
the header can be used to reconstruct another kind of header.
"""
# TODO: Previously this only deleted some cards specific to an HDU if
# _hdutype matched that type. But it seemed simple enough to just
# delete all desired cards anyways, and just ignore the KeyErrors if
# they don't exist.
# However, it might be desirable to make this extendable somehow--have
# a way for HDU classes to specify some headers that are specific only
# to that type, and should be removed otherwise.
naxis = self.get("NAXIS", 0)
tfields = self.get("TFIELDS", 0)
for idx in range(naxis):
self.remove("NAXIS" + str(idx + 1), ignore_missing=True)
for name in (
"TFORM",
"TSCAL",
"TZERO",
"TNULL",
"TTYPE",
"TUNIT",
"TDISP",
"TDIM",
"THEAP",
"TBCOL",
):
for idx in range(tfields):
self.remove(name + str(idx + 1), ignore_missing=True)
for name in (
"SIMPLE",
"XTENSION",
"BITPIX",
"NAXIS",
"EXTEND",
"PCOUNT",
"GCOUNT",
"GROUPS",
"BSCALE",
"BZERO",
"TFIELDS",
):
self.remove(name, ignore_missing=True)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
def _update(self, card):
"""
The real update code. If keyword already exists, its value and/or
comment will be updated. Otherwise a new card will be appended.
This will not create a duplicate keyword except in the case of
commentary cards. The only other way to force creation of a duplicate
is to use the insert(), append(), or extend() methods.
"""
keyword, value, comment = card
# Lookups for existing/known keywords are case-insensitive
keyword = keyword.strip().upper()
if keyword.startswith("HIERARCH "):
keyword = keyword[9:]
if keyword not in _commentary_keywords and keyword in self._keyword_indices:
# Easy; just update the value/comment
idx = self._keyword_indices[keyword][0]
existing_card = self._cards[idx]
existing_card.value = value
if comment is not None:
# '' should be used to explicitly blank a comment
existing_card.comment = comment
if existing_card._modified:
self._modified = True
elif keyword in _commentary_keywords:
cards = self._splitcommentary(keyword, value)
if keyword in self._keyword_indices:
# Append after the last keyword of the same type
idx = self.index(keyword, start=len(self) - 1, stop=-1)
isblank = not (keyword or value or comment)
for c in reversed(cards):
self.insert(idx + 1, c, useblanks=(not isblank))
else:
for c in cards:
self.append(c, bottom=True)
else:
# A new keyword! self.append() will handle updating _modified
self.append(card)
def _cardindex(self, key):
"""Returns an index into the ._cards list given a valid lookup key."""
# This used to just set key = (key, 0) and then go on to act as if the
# user passed in a tuple, but it's much more common to just be given a
# string as the key, so optimize more for that case
if isinstance(key, str):
keyword = key
n = 0
elif isinstance(key, numbers.Integral):
# If < 0, determine the actual index
if key < 0:
key += len(self._cards)
if key < 0 or key >= len(self._cards):
raise IndexError("Header index out of range.")
return key
elif isinstance(key, slice):
return key
elif isinstance(key, tuple):
if (
len(key) != 2
or not isinstance(key[0], str)
or not isinstance(key[1], numbers.Integral)
):
raise ValueError(
"Tuple indices must be 2-tuples consisting of a "
"keyword string and an integer index."
)
keyword, n = key
else:
raise ValueError(
"Header indices must be either a string, a 2-tuple, or an integer."
)
keyword = Card.normalize_keyword(keyword)
# Returns the index into _cards for the n-th card with the given
# keyword (where n is 0-based)
indices = self._keyword_indices.get(keyword, None)
if keyword and not indices:
if len(keyword) > KEYWORD_LENGTH or "." in keyword:
raise KeyError(f"Keyword {keyword!r} not found.")
else:
# Maybe it's a RVKC?
indices = self._rvkc_indices.get(keyword, None)
if not indices:
raise KeyError(f"Keyword {keyword!r} not found.")
try:
return indices[n]
except IndexError:
raise IndexError(
f"There are only {len(indices)} {keyword!r} cards in the header."
)
def _keyword_from_index(self, idx):
"""
Given an integer index, return the (keyword, repeat) tuple that index
refers to. For most keywords the repeat will always be zero, but it
may be greater than zero for keywords that are duplicated (especially
commentary keywords).
In a sense this is the inverse of self.index, except that it also
supports duplicates.
"""
if idx < 0:
idx += len(self._cards)
keyword = self._cards[idx].keyword
keyword = Card.normalize_keyword(keyword)
repeat = self._keyword_indices[keyword].index(idx)
return keyword, repeat
def _relativeinsert(self, card, before=None, after=None, replace=False):
"""
Inserts a new card before or after an existing card; used to
implement support for the legacy before/after keyword arguments to
Header.update().
If replace=True, move an existing card with the same keyword.
"""
if before is None:
insertionkey = after
else:
insertionkey = before
def get_insertion_idx():
if not (
isinstance(insertionkey, numbers.Integral)
and insertionkey >= len(self._cards)
):
idx = self._cardindex(insertionkey)
else:
idx = insertionkey
if before is None:
idx += 1
return idx
if replace:
# The card presumably already exists somewhere in the header.
# Check whether or not we actually have to move it; if it does need
# to be moved we just delete it and then it will be reinserted
# below
old_idx = self._cardindex(card.keyword)
insertion_idx = get_insertion_idx()
if insertion_idx >= len(self._cards) and old_idx == len(self._cards) - 1:
# The card would be appended to the end, but it's already at
# the end
return
if before is not None:
if old_idx == insertion_idx - 1:
return
elif after is not None and old_idx == insertion_idx:
return
del self[old_idx]
# Even if replace=True, the insertion idx may have changed since the
# old card was deleted
idx = get_insertion_idx()
if card[0] in _commentary_keywords:
cards = reversed(self._splitcommentary(card[0], card[1]))
else:
cards = [card]
for c in cards:
self.insert(idx, c)
def _updateindices(self, idx, increment=True):
"""
For all cards with index above idx, increment or decrement its index
value in the keyword_indices dict.
"""
if idx > len(self._cards):
# Save us some effort
return
increment = 1 if increment else -1
for index_sets in (self._keyword_indices, self._rvkc_indices):
for indices in index_sets.values():
for jdx, keyword_index in enumerate(indices):
if keyword_index >= idx:
indices[jdx] += increment
def _countblanks(self):
"""Returns the number of blank cards at the end of the Header."""
for idx in range(1, len(self._cards)):
if not self._cards[-idx].is_blank:
return idx - 1
return 0
def _useblanks(self, count):
for _ in range(count):
if self._cards[-1].is_blank:
del self[-1]
else:
break
def _haswildcard(self, keyword):
"""Return `True` if the input keyword contains a wildcard pattern."""
return isinstance(keyword, str) and (
keyword.endswith("...") or "*" in keyword or "?" in keyword
)
def _wildcardmatch(self, pattern):
"""
Returns a list of indices of the cards matching the given wildcard
pattern.
* '*' matches 0 or more characters
* '?' matches a single character
* '...' matches 0 or more of any non-whitespace character
"""
pattern = pattern.replace("*", r".*").replace("?", r".")
pattern = pattern.replace("...", r"\S*") + "$"
match_pattern = re.compile(pattern, re.I).match
return [i for i, card in enumerate(self._cards) if match_pattern(card.keyword)]
def _set_slice(self, key, value, target):
"""
Used to implement Header.__setitem__ and CardAccessor.__setitem__.
"""
if isinstance(key, slice) or self._haswildcard(key):
if isinstance(key, slice):
indices = range(*key.indices(len(target)))
else:
indices = self._wildcardmatch(key)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
target[idx] = val
return True
return False
def _splitcommentary(self, keyword, value):
"""
Given a commentary keyword and value, returns a list of the one or more
cards needed to represent the full value. This is primarily used to
create the multiple commentary cards needed to represent a long value
that won't fit into a single commentary card.
"""
# The maximum value in each card can be the maximum card length minus
# the maximum key length (which can include spaces if they key length
# less than 8
maxlen = Card.length - KEYWORD_LENGTH
valuestr = str(value)
if len(valuestr) <= maxlen:
# The value can fit in a single card
cards = [Card(keyword, value)]
else:
# The value must be split across multiple consecutive commentary
# cards
idx = 0
cards = []
while idx < len(valuestr):
cards.append(Card(keyword, valuestr[idx : idx + maxlen]))
idx += maxlen
return cards
def _add_commentary(self, key, value, before=None, after=None):
"""
Add a commentary card.
If ``before`` and ``after`` are `None`, add to the last occurrence
of cards of the same name (except blank card). If there is no
card (or blank card), append at the end.
"""
if before is not None or after is not None:
self._relativeinsert((key, value), before=before, after=after)
else:
self[key] = value
collections.abc.MutableSequence.register(Header)
collections.abc.MutableMapping.register(Header)
class _DelayedHeader:
"""
Descriptor used to create the Header object from the header string that
was stored in HDU._header_str when parsing the file.
"""
def __get__(self, obj, owner=None):
try:
return obj.__dict__["_header"]
except KeyError:
if obj._header_str is not None:
hdr = Header.fromstring(obj._header_str)
obj._header_str = None
else:
raise AttributeError(
f"'{type(obj).__name__}' object has no attribute '_header'"
)
obj.__dict__["_header"] = hdr
return hdr
def __set__(self, obj, val):
obj.__dict__["_header"] = val
def __delete__(self, obj):
del obj.__dict__["_header"]
class _BasicHeaderCards:
"""
This class allows to access cards with the _BasicHeader.cards attribute.
This is needed because during the HDU class detection, some HDUs uses
the .cards interface. Cards cannot be modified here as the _BasicHeader
object will be deleted once the HDU object is created.
"""
def __init__(self, header):
self.header = header
def __getitem__(self, key):
# .cards is a list of cards, so key here is an integer.
# get the keyword name from its index.
key = self.header._keys[key]
# then we get the card from the _BasicHeader._cards list, or parse it
# if needed.
try:
return self.header._cards[key]
except KeyError:
cardstr = self.header._raw_cards[key]
card = Card.fromstring(cardstr)
self.header._cards[key] = card
return card
class _BasicHeader(collections.abc.Mapping):
"""This class provides a fast header parsing, without all the additional
features of the Header class. Here only standard keywords are parsed, no
support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc.
The raw card images are stored and parsed only if needed. The idea is that
to create the HDU objects, only a small subset of standard cards is needed.
Once a card is parsed, which is deferred to the Card class, the Card object
is kept in a cache. This is useful because a small subset of cards is used
a lot in the HDU creation process (NAXIS, XTENSION, ...).
"""
def __init__(self, cards):
# dict of (keywords, card images)
self._raw_cards = cards
self._keys = list(cards.keys())
# dict of (keyword, Card object) storing the parsed cards
self._cards = {}
# the _BasicHeaderCards object allows to access Card objects from
# keyword indices
self.cards = _BasicHeaderCards(self)
self._modified = False
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
key = self._keys[key]
try:
return self._cards[key].value
except KeyError:
# parse the Card and store it
cardstr = self._raw_cards[key]
self._cards[key] = card = Card.fromstring(cardstr)
return card.value
def __len__(self):
return len(self._raw_cards)
def __iter__(self):
return iter(self._raw_cards)
def index(self, keyword):
return self._keys.index(keyword)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
@classmethod
def fromfile(cls, fileobj):
"""The main method to parse a FITS header from a file. The parsing is
done with the parse_header function implemented in Cython.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "rb")
close_file = True
try:
header_str, cards = parse_header(fileobj)
_check_padding(header_str, BLOCK_SIZE, False)
return header_str, cls(cards)
finally:
if close_file:
fileobj.close()
class _CardAccessor:
"""
This is a generic class for wrapping a Header in such a way that you can
use the header's slice/filtering capabilities to return a subset of cards
and do something with them.
This is sort of the opposite notion of the old CardList class--whereas
Header used to use CardList to get lists of cards, this uses Header to get
lists of cards.
"""
# TODO: Consider giving this dict/list methods like Header itself
def __init__(self, header):
self._header = header
def __repr__(self):
return "\n".join(repr(c) for c in self._header._cards)
def __len__(self):
return len(self._header._cards)
def __iter__(self):
return iter(self._header._cards)
def __eq__(self, other):
# If the `other` item is a scalar we will still treat it as equal if
# this _CardAccessor only contains one item
if not isiterable(other) or isinstance(other, str):
if len(self) == 1:
other = [other]
else:
return False
return all(a == b for a, b in itertools.zip_longest(self, other))
def __ne__(self, other):
return not (self == other)
def __getitem__(self, item):
if isinstance(item, slice) or self._header._haswildcard(item):
return self.__class__(self._header[item])
idx = self._header._cardindex(item)
return self._header._cards[idx]
def _setslice(self, item, value):
"""
Helper for implementing __setitem__ on _CardAccessor subclasses; slices
should always be handled in this same way.
"""
if isinstance(item, slice) or self._header._haswildcard(item):
if isinstance(item, slice):
indices = range(*item.indices(len(self)))
else:
indices = self._header._wildcardmatch(item)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
self[idx] = val
return True
return False
class _HeaderComments(_CardAccessor):
"""
A class used internally by the Header class for the Header.comments
attribute access.
This object can be used to display all the keyword comments in the Header,
or look up the comments on specific keywords. It allows all the same forms
of keyword lookup as the Header class itself, but returns comments instead
of values.
"""
def __iter__(self):
for card in self._header._cards:
yield card.comment
def __repr__(self):
"""Returns a simple list of all keywords and their comments."""
keyword_length = KEYWORD_LENGTH
for card in self._header._cards:
keyword_length = max(keyword_length, len(card.keyword))
return "\n".join(
"{:>{len}} {}".format(c.keyword, c.comment, len=keyword_length)
for c in self._header._cards
)
def __getitem__(self, item):
"""
Slices and filter strings return a new _HeaderComments containing the
returned cards. Otherwise the comment of a single card is returned.
"""
item = super().__getitem__(item)
if isinstance(item, _HeaderComments):
# The item key was a slice
return item
return item.comment
def __setitem__(self, item, comment):
"""
Set/update the comment on specified card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, comment, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
idx = self._header._cardindex(item)
value = self._header[idx]
self._header[idx] = (value, comment)
class _HeaderCommentaryCards(_CardAccessor):
"""
This is used to return a list-like sequence over all the values in the
header for a given commentary keyword, such as HISTORY.
"""
def __init__(self, header, keyword=""):
super().__init__(header)
self._keyword = keyword
self._count = self._header.count(self._keyword)
self._indices = slice(self._count).indices(self._count)
# __len__ and __iter__ need to be overridden from the base class due to the
# different approach this class has to take for slicing
def __len__(self):
return len(range(*self._indices))
def __iter__(self):
for idx in range(*self._indices):
yield self._header[(self._keyword, idx)]
def __repr__(self):
return "\n".join(str(x) for x in self)
def __getitem__(self, idx):
if isinstance(idx, slice):
n = self.__class__(self._header, self._keyword)
n._indices = idx.indices(self._count)
return n
elif not isinstance(idx, numbers.Integral):
raise ValueError(f"{self._keyword} index must be an integer")
idx = list(range(*self._indices))[idx]
return self._header[(self._keyword, idx)]
def __setitem__(self, item, value):
"""
Set the value of a specified commentary card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, value, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
self._header[(self._keyword, item)] = value
def _block_size(sep):
"""
Determine the size of a FITS header block if a non-blank separator is used
between cards.
"""
return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1))
def _pad_length(stringlen):
"""Bytes needed to pad the input stringlen to the next FITS block."""
return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE
def _check_padding(header_str, block_size, is_eof, check_block_size=True):
# Strip any zero-padding (see ticket #106)
if header_str and header_str[-1] == "\0":
if is_eof and header_str.strip("\0") == "":
# TODO: Pass this warning to validation framework
warnings.warn(
"Unexpected extra padding at the end of the file. This "
"padding may not be preserved when saving changes.",
AstropyUserWarning,
)
raise EOFError()
else:
# Replace the illegal null bytes with spaces as required by
# the FITS standard, and issue a nasty warning
# TODO: Pass this warning to validation framework
warnings.warn(
"Header block contains null bytes instead of spaces for "
"padding, and is not FITS-compliant. Nulls may be "
"replaced with spaces upon writing.",
AstropyUserWarning,
)
header_str.replace("\0", " ")
if check_block_size and (len(header_str) % block_size) != 0:
# This error message ignores the length of the separator for
# now, but maybe it shouldn't?
actual_len = len(header_str) - block_size + BLOCK_SIZE
# TODO: Pass this error to validation framework
raise ValueError(f"Header size is not multiple of {BLOCK_SIZE}: {actual_len}")
def _hdr_data_size(header):
"""Calculate the data size (in bytes) following the given `Header`."""
size = 0
naxis = header.get("NAXIS", 0)
if naxis > 0:
size = 1
for idx in range(naxis):
size = size * header["NAXIS" + str(idx + 1)]
bitpix = header["BITPIX"]
gcount = header.get("GCOUNT", 1)
pcount = header.get("PCOUNT", 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
|
93f6318112108439cc2faad9db9b68b565012b789a900adf76f024696a7724c8 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import operator
import warnings
import weakref
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import lazyproperty
from .column import (
_VLF,
ASCII2NUMPY,
ASCII2STR,
ASCIITNULL,
FITS2NUMPY,
ColDefs,
Delayed,
_AsciiColDefs,
_FormatP,
_FormatX,
_get_index,
_makep,
_unwrapx,
_wrapx,
)
from .util import _rstrip_inplace, decode_ascii, encode_ascii
class FITS_record:
"""
FITS record class.
`FITS_record` is used to access records of the `FITS_rec` object.
This will allow us to deal with scaled columns. It also handles
conversion/scaling of columns in ASCII tables. The `FITS_record`
class expects a `FITS_rec` object as input.
"""
def __init__(
self, input, row=0, start=None, end=None, step=None, base=None, **kwargs
):
"""
Parameters
----------
input : array
The array to wrap.
row : int, optional
The starting logical row of the array.
start : int, optional
The starting column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
end : int, optional
The ending column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
"""
self.array = input
self.row = row
if base:
width = len(base)
else:
width = self.array._nfields
s = slice(start, end, step).indices(width)
self.start, self.end, self.step = s
self.base = base
def __getitem__(self, key):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
return type(self)(self.array, self.row, key.start, key.stop, key.step, self)
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
return self.array.field(indx)[self.row]
def __setitem__(self, key, value):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
for indx in range(slice.start, slice.stop, slice.step):
indx = self._get_indx(indx)
self.array.field(indx)[self.row] = value
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
self.array.field(indx)[self.row] = value
def __len__(self):
return len(range(self.start, self.end, self.step))
def __repr__(self):
"""
Display a single row.
"""
outlist = []
for idx in range(len(self)):
outlist.append(repr(self[idx]))
return f"({', '.join(outlist)})"
def field(self, field):
"""
Get the field data of the record.
"""
return self.__getitem__(field)
def setfield(self, field, value):
"""
Set the field data of the record.
"""
self.__setitem__(field, value)
@lazyproperty
def _bases(self):
bases = [weakref.proxy(self)]
base = self.base
while base:
bases.append(base)
base = base.base
return bases
def _get_index(self, indx):
indices = np.ogrid[: self.array._nfields]
for base in reversed(self._bases):
if base.step < 1:
s = slice(base.start, None, base.step)
else:
s = slice(base.start, base.end, base.step)
indices = indices[s]
return indices[indx]
class FITS_rec(np.recarray):
"""
FITS record array class.
`FITS_rec` is the data part of a table HDU's data part. This is a layer
over the `~numpy.recarray`, so we can deal with scaled columns.
It inherits all of the standard methods from `numpy.ndarray`.
"""
_record_type = FITS_record
_character_as_bytes = False
_load_variable_length_data = True
def __new__(subtype, input):
"""
Construct a FITS record array from a recarray.
"""
# input should be a record array
if input.dtype.subdtype is None:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data
)
else:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data, strides=input.strides
)
self._init()
if self.dtype.fields:
self._nfields = len(self.dtype.fields)
return self
def __setstate__(self, state):
meta = state[-1]
column_state = state[-2]
state = state[:-2]
super().__setstate__(state)
self._col_weakrefs = weakref.WeakSet()
for attr, value in zip(meta, column_state):
setattr(self, attr, value)
def __reduce__(self):
"""
Return a 3-tuple for pickling a FITS_rec. Use the super-class
functionality but then add in a tuple of FITS_rec-specific
values that get used in __setstate__.
"""
reconst_func, reconst_func_args, state = super().__reduce__()
# Define FITS_rec-specific attrs that get added to state
column_state = []
meta = []
for attrs in [
"_converted",
"_heapoffset",
"_heapsize",
"_nfields",
"_gap",
"_uint",
"parnames",
"_coldefs",
]:
with suppress(AttributeError):
# _coldefs can be Delayed, and file objects cannot be
# picked, it needs to be deepcopied first
if attrs == "_coldefs":
column_state.append(self._coldefs.__deepcopy__(None))
else:
column_state.append(getattr(self, attrs))
meta.append(attrs)
state = state + (column_state, meta)
return reconst_func, reconst_func_args, state
def __array_finalize__(self, obj):
if obj is None:
return
if isinstance(obj, FITS_rec):
self._character_as_bytes = obj._character_as_bytes
if isinstance(obj, FITS_rec) and obj.dtype == self.dtype:
self._converted = obj._converted
self._heapoffset = obj._heapoffset
self._heapsize = obj._heapsize
self._col_weakrefs = obj._col_weakrefs
self._coldefs = obj._coldefs
self._nfields = obj._nfields
self._gap = obj._gap
self._uint = obj._uint
elif self.dtype.fields is not None:
# This will allow regular ndarrays with fields, rather than
# just other FITS_rec objects
self._nfields = len(self.dtype.fields)
self._converted = {}
self._heapoffset = getattr(obj, "_heapoffset", 0)
self._heapsize = getattr(obj, "_heapsize", 0)
self._gap = getattr(obj, "_gap", 0)
self._uint = getattr(obj, "_uint", False)
self._col_weakrefs = weakref.WeakSet()
self._coldefs = ColDefs(self)
# Work around chicken-egg problem. Column.array relies on the
# _coldefs attribute to set up ref back to parent FITS_rec; however
# in the above line the self._coldefs has not been assigned yet so
# this fails. This patches that up...
for col in self._coldefs:
del col.array
col._parent_fits_rec = weakref.ref(self)
else:
self._init()
def _init(self):
"""Initializes internal attributes specific to FITS-isms."""
self._nfields = 0
self._converted = {}
self._heapoffset = 0
self._heapsize = 0
self._col_weakrefs = weakref.WeakSet()
self._coldefs = None
self._gap = 0
self._uint = False
@classmethod
def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False):
"""
Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec`
object.
.. note::
This was originally part of the ``new_table`` function in the table
module but was moved into a class method since most of its
functionality always had more to do with initializing a `FITS_rec`
object than anything else, and much of it also overlapped with
``FITS_rec._scale_back``.
Parameters
----------
columns : sequence of `Column` or a `ColDefs`
The columns from which to create the table data. If these
columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns
will be used as a template for a new table with the requested
number of rows.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If
`False`, copy the data from input, undefined cells will still
be filled with zeros/blanks.
"""
if not isinstance(columns, ColDefs):
columns = ColDefs(columns)
# read the delayed data
for column in columns:
arr = column.array
if isinstance(arr, Delayed):
if arr.hdu.data is None:
column.array = None
else:
column.array = _get_recarray_field(arr.hdu.data, arr.field)
# Reset columns._arrays (which we may want to just do away with
# altogether
del columns._arrays
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in columns._arrays:
if arr is not None:
dim = arr.shape[0]
else:
dim = 0
if dim > nrows:
nrows = dim
raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8)
raw_data.fill(ord(columns._padding_byte))
data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls)
data._character_as_bytes = character_as_bytes
# Previously this assignment was made from hdu.columns, but that's a
# bug since if a _TableBaseHDU has a FITS_rec in its .data attribute
# the _TableBaseHDU.columns property is actually returned from
# .data._coldefs, so this assignment was circular! Don't make that
# mistake again.
# All of this is an artifact of the fragility of the FITS_rec class,
# and that it can't just be initialized by columns...
data._coldefs = columns
# If fill is True we don't copy anything from the column arrays. We're
# just using them as a template, and returning a table filled with
# zeros/blanks
if fill:
return data
# Otherwise we have to fill the recarray with data from the input
# columns
for idx, column in enumerate(columns):
# For each column in the ColDef object, determine the number of
# rows in that column. This will be either the number of rows in
# the ndarray associated with the column, or the number of rows
# given in the call to this function, which ever is smaller. If
# the input FILL argument is true, the number of rows is set to
# zero so that no data is copied from the original input data.
arr = column.array
if arr is None:
array_size = 0
else:
array_size = len(arr)
n = min(array_size, nrows)
# TODO: At least *some* of this logic is mostly redundant with the
# _convert_foo methods in this class; see if we can eliminate some
# of that duplication.
if not n:
# The input column had an empty array, so just use the fill
# value
continue
field = _get_recarray_field(data, idx)
name = column.name
fitsformat = column.format
recformat = fitsformat.recformat
outarr = field[:n]
inarr = arr[:n]
if isinstance(recformat, _FormatX):
# Data is a bit array
if inarr.shape[-1] == recformat.repeat:
_wrapx(inarr, outarr, recformat.repeat)
continue
elif isinstance(recformat, _FormatP):
data._cache_field(name, _makep(inarr, field, recformat, nrows=nrows))
continue
# TODO: Find a better way of determining that the column is meant
# to be FITS L formatted
elif recformat[-2:] == FITS2NUMPY["L"] and inarr.dtype == bool:
# column is boolean
# The raw data field should be filled with either 'T' or 'F'
# (not 0). Use 'F' as a default
field[:] = ord("F")
# Also save the original boolean array in data._converted so
# that it doesn't have to be re-converted
converted = np.zeros(field.shape, dtype=bool)
converted[:n] = inarr
data._cache_field(name, converted)
# TODO: Maybe this step isn't necessary at all if _scale_back
# will handle it?
inarr = np.where(inarr == np.False_, ord("F"), ord("T"))
elif columns[idx]._physical_values and columns[idx]._pseudo_unsigned_ints:
# Temporary hack...
bzero = column.bzero
converted = np.zeros(field.shape, dtype=inarr.dtype)
converted[:n] = inarr
data._cache_field(name, converted)
if n < nrows:
# Pre-scale rows below the input data
field[n:] = -bzero
inarr = inarr - bzero
elif isinstance(columns, _AsciiColDefs):
# Regardless whether the format is character or numeric, if the
# input array contains characters then it's already in the raw
# format for ASCII tables
if fitsformat._pseudo_logical:
# Hack to support converting from 8-bit T/F characters
# Normally the column array is a chararray of 1 character
# strings, but we need to view it as a normal ndarray of
# 8-bit ints to fill it with ASCII codes for 'T' and 'F'
outarr = field.view(np.uint8, np.ndarray)[:n]
elif arr.dtype.kind not in ("S", "U"):
# Set up views of numeric columns with the appropriate
# numeric dtype
# Fill with the appropriate blanks for the column format
data._cache_field(name, np.zeros(nrows, dtype=arr.dtype))
outarr = data._converted[name][:n]
outarr[:] = inarr
continue
if inarr.shape != outarr.shape:
if (
inarr.dtype.kind == outarr.dtype.kind
and inarr.dtype.kind in ("U", "S")
and inarr.dtype != outarr.dtype
):
inarr_rowsize = inarr[0].size
inarr = inarr.flatten().view(outarr.dtype)
# This is a special case to handle input arrays with
# non-trivial TDIMn.
# By design each row of the outarray is 1-D, while each row of
# the input array may be n-D
if outarr.ndim > 1:
# The normal case where the first dimension is the rows
inarr_rowsize = inarr[0].size
inarr = inarr.reshape(n, inarr_rowsize)
outarr[:, :inarr_rowsize] = inarr
else:
# Special case for strings where the out array only has one
# dimension (the second dimension is rolled up into the
# strings
outarr[:n] = inarr.ravel()
else:
outarr[:] = inarr
# Now replace the original column array references with the new
# fields
# This is required to prevent the issue reported in
# https://github.com/spacetelescope/PyFITS/issues/99
for idx in range(len(columns)):
columns._arrays[idx] = data.field(idx)
return data
def __repr__(self):
# Force use of the normal ndarray repr (rather than the new
# one added for recarray in Numpy 1.10) for backwards compat
return np.ndarray.__repr__(self)
def __getattribute__(self, attr):
# First, see if ndarray has this attr, and return it if so. Note that
# this means a field with the same name as an ndarray attr cannot be
# accessed by attribute, this is Numpy's default behavior.
# We avoid using np.recarray.__getattribute__ here because after doing
# this check it would access the columns without doing the conversions
# that we need (with .field, see below).
try:
return object.__getattribute__(self, attr)
except AttributeError:
pass
# attr might still be a fieldname. If we have column definitions,
# we should access this via .field, as the data may have to be scaled.
if self._coldefs is not None and attr in self.columns.names:
return self.field(attr)
# If not, just let the usual np.recarray override deal with it.
return super().__getattribute__(attr)
def __getitem__(self, key):
if self._coldefs is None:
return super().__getitem__(key)
if isinstance(key, str):
return self.field(key)
# Have to view as a recarray then back as a FITS_rec, otherwise the
# circular reference fix/hack in FITS_rec.field() won't preserve
# the slice.
out = self.view(np.recarray)[key]
if type(out) is not np.recarray:
# Oops, we got a single element rather than a view. In that case,
# return a Record, which has no __getstate__ and is more efficient.
return self._record_type(self, key)
# We got a view; change it back to our class, and add stuff
out = out.view(type(self))
out._uint = self._uint
out._coldefs = ColDefs(self._coldefs)
arrays = []
out._converted = {}
for idx, name in enumerate(self._coldefs.names):
#
# Store the new arrays for the _coldefs object
#
arrays.append(self._coldefs._arrays[idx][key])
# Ensure that the sliced FITS_rec will view the same scaled
# columns as the original; this is one of the few cases where
# it is not necessary to use _cache_field()
if name in self._converted:
dummy = self._converted[name]
field = np.ndarray.__getitem__(dummy, key)
out._converted[name] = field
out._coldefs._arrays = arrays
return out
def __setitem__(self, key, value):
if self._coldefs is None:
return super().__setitem__(key, value)
if isinstance(key, str):
self[key][:] = value
return
if isinstance(key, slice):
end = min(len(self), key.stop or len(self))
end = max(0, end)
start = max(0, key.start or 0)
end = min(end, start + len(value))
for idx in range(start, end):
self.__setitem__(idx, value[idx - start])
return
if isinstance(value, FITS_record):
for idx in range(self._nfields):
self.field(self.names[idx])[key] = value.field(self.names[idx])
elif isinstance(value, (tuple, list, np.void)):
if self._nfields == len(value):
for idx in range(self._nfields):
self.field(idx)[key] = value[idx]
else:
raise ValueError(
f"Input tuple or list required to have {self._nfields} elements."
)
else:
raise TypeError(
"Assignment requires a FITS_record, tuple, or list as input."
)
def _ipython_key_completions_(self):
return self.names
def copy(self, order="C"):
"""
The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to
`numpy.copy`. Differences include that it re-views the copied array as
self's ndarray subclass, as though it were taking a slice; this means
``__array_finalize__`` is called and the copy shares all the array
attributes (including ``._converted``!). So we need to make a deep
copy of all those attributes so that the two arrays truly do not share
any data.
"""
new = super().copy(order=order)
new.__dict__ = copy.deepcopy(self.__dict__)
return new
@property
def columns(self):
"""A user-visible accessor for the coldefs."""
return self._coldefs
@property
def _coldefs(self):
# This used to be a normal internal attribute, but it was changed to a
# property as a quick and transparent way to work around the reference
# leak bug fixed in https://github.com/astropy/astropy/pull/4539
#
# See the long comment in the Column.array property for more details
# on this. But in short, FITS_rec now has a ._col_weakrefs attribute
# which is a WeakSet of weakrefs to each Column in _coldefs.
#
# So whenever ._coldefs is set we also add each Column in the ColDefs
# to the weakrefs set. This is an easy way to find out if a Column has
# any references to it external to the FITS_rec (i.e. a user assigned a
# column to a variable). If the column is still in _col_weakrefs then
# there are other references to it external to this FITS_rec. We use
# that information in __del__ to save off copies of the array data
# for those columns to their Column.array property before our memory
# is freed.
return self.__dict__.get("_coldefs")
@_coldefs.setter
def _coldefs(self, cols):
self.__dict__["_coldefs"] = cols
if isinstance(cols, ColDefs):
for col in cols.columns:
self._col_weakrefs.add(col)
@_coldefs.deleter
def _coldefs(self):
try:
del self.__dict__["_coldefs"]
except KeyError as exc:
raise AttributeError(exc.args[0])
def __del__(self):
try:
del self._coldefs
if self.dtype.fields is not None:
for col in self._col_weakrefs:
if col.array is not None:
col.array = col.array.copy()
# See issues #4690 and #4912
except (AttributeError, TypeError): # pragma: no cover
pass
@property
def names(self):
"""List of column names."""
if self.dtype.fields:
return list(self.dtype.names)
elif getattr(self, "_coldefs", None) is not None:
return self._coldefs.names
else:
return None
@property
def formats(self):
"""List of column FITS formats."""
if getattr(self, "_coldefs", None) is not None:
return self._coldefs.formats
return None
@property
def _raw_itemsize(self):
"""
Returns the size of row items that would be written to the raw FITS
file, taking into account the possibility of unicode columns being
compactified.
Currently for internal use only.
"""
if _has_unicode_fields(self):
total_itemsize = 0
for field in self.dtype.fields.values():
itemsize = field[0].itemsize
if field[0].kind == "U":
itemsize = itemsize // 4
total_itemsize += itemsize
return total_itemsize
else:
# Just return the normal itemsize
return self.itemsize
def field(self, key):
"""
A view of a `Column`'s data as an array.
"""
# NOTE: The *column* index may not be the same as the field index in
# the recarray, if the column is a phantom column
column = self.columns[key]
name = column.name
format = column.format
if format.dtype.itemsize == 0:
warnings.warn(
"Field {!r} has a repeat count of 0 in its format code, "
"indicating an empty field.".format(key)
)
return np.array([], dtype=format.dtype)
# If field's base is a FITS_rec, we can run into trouble because it
# contains a reference to the ._coldefs object of the original data;
# this can lead to a circular reference; see ticket #49
base = self
while isinstance(base, FITS_rec) and isinstance(base.base, np.recarray):
base = base.base
# base could still be a FITS_rec in some cases, so take care to
# use rec.recarray.field to avoid a potential infinite
# recursion
field = _get_recarray_field(base, name)
if name not in self._converted:
recformat = format.recformat
# TODO: If we're now passing the column to these subroutines, do we
# really need to pass them the recformat?
if isinstance(recformat, _FormatP) and self._load_variable_length_data:
# for P format
converted = self._convert_p(column, field, recformat)
else:
# Handle all other column data types which are fixed-width
# fields
converted = self._convert_other(column, field, recformat)
# Note: Never assign values directly into the self._converted dict;
# always go through self._cache_field; this way self._converted is
# only used to store arrays that are not already direct views of
# our own data.
self._cache_field(name, converted)
return converted
return self._converted[name]
def _cache_field(self, name, field):
"""
Do not store fields in _converted if one of its bases is self,
or if it has a common base with self.
This results in a reference cycle that cannot be broken since
ndarrays do not participate in cyclic garbage collection.
"""
base = field
while True:
self_base = self
while True:
if self_base is base:
return
if getattr(self_base, "base", None) is not None:
self_base = self_base.base
else:
break
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self._converted[name] = field
def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value):
"""
Update how the data is formatted depending on changes to column
attributes initiated by the user through the `Column` interface.
Dispatches column attribute change notifications to individual methods
for each attribute ``_update_column_<attr>``
"""
method_name = f"_update_column_{attr}"
if hasattr(self, method_name):
# Right now this is so we can be lazy and not implement updaters
# for every attribute yet--some we may not need at all, TBD
getattr(self, method_name)(column, idx, old_value, new_value)
def _update_column_name(self, column, idx, old_name, name):
"""Update the dtype field names when a column name is changed."""
dtype = self.dtype
# Updating the names on the dtype should suffice
dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]
def _convert_x(self, field, recformat):
"""Convert a raw table column to a bit array as specified by the
FITS X format.
"""
dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_)
_unwrapx(field, dummy, recformat.repeat)
return dummy
def _convert_p(self, column, field, recformat):
"""Convert a raw table column of FITS P or Q format descriptors
to a VLA column with the array data returned from the heap.
"""
if column.dim:
vla_shape = tuple(
reversed(tuple(map(int, column.dim.strip("()").split(","))))
)
dummy = _VLF([None] * len(self), dtype=recformat.dtype)
raw_data = self._get_raw_data()
if raw_data is None:
raise OSError(
"Could not find heap data for the {!r} variable-length "
"array column.".format(column.name)
)
for idx in range(len(self)):
offset = field[idx, 1] + self._heapoffset
count = field[idx, 0]
if recformat.dtype == "a":
dt = np.dtype(recformat.dtype + str(1))
arr_len = count * dt.itemsize
da = raw_data[offset : offset + arr_len].view(dt)
da = np.char.array(da.view(dtype=dt), itemsize=count)
dummy[idx] = decode_ascii(da)
else:
dt = np.dtype(recformat.dtype)
arr_len = count * dt.itemsize
dummy[idx] = raw_data[offset : offset + arr_len].view(dt)
if column.dim and len(vla_shape) > 1:
# The VLA is reshaped consistently with TDIM instructions
if vla_shape[0] == 1:
dummy[idx] = dummy[idx].reshape(1, len(dummy[idx]))
else:
vla_dim = vla_shape[1:]
vla_first = int(len(dummy[idx]) / np.prod(vla_dim))
dummy[idx] = dummy[idx].reshape((vla_first,) + vla_dim)
dummy[idx].dtype = dummy[idx].dtype.newbyteorder(">")
# Each array in the field may now require additional
# scaling depending on the other scaling parameters
# TODO: The same scaling parameters apply to every
# array in the column so this is currently very slow; we
# really only need to check once whether any scaling will
# be necessary and skip this step if not
# TODO: Test that this works for X format; I don't think
# that it does--the recformat variable only applies to the P
# format not the X format
dummy[idx] = self._convert_other(column, dummy[idx], recformat)
return dummy
def _convert_ascii(self, column, field):
"""
Special handling for ASCII table columns to convert columns containing
numeric types to actual numeric arrays from the string representation.
"""
format = column.format
recformat = getattr(format, "recformat", ASCII2NUMPY[format[0]])
# if the string = TNULL, return ASCIITNULL
nullval = str(column.null).strip().encode("ascii")
if len(nullval) > format.width:
nullval = nullval[: format.width]
# Before using .replace make sure that any trailing bytes in each
# column are filled with spaces, and *not*, say, nulls; this causes
# functions like replace to potentially leave gibberish bytes in the
# array buffer.
dummy = np.char.ljust(field, format.width)
dummy = np.char.replace(dummy, encode_ascii("D"), encode_ascii("E"))
null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width))
# Convert all fields equal to the TNULL value (nullval) to empty fields.
# TODO: These fields really should be converted to NaN or something else undefined.
# Currently they are converted to empty fields, which are then set to zero.
dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy)
# always replace empty fields, see https://github.com/astropy/astropy/pull/5394
if nullval != b"":
dummy = np.where(np.char.strip(dummy) == b"", null_fill, dummy)
try:
dummy = np.array(dummy, dtype=recformat)
except ValueError as exc:
indx = self.names.index(column.name)
raise ValueError(
"{}; the header may be missing the necessary TNULL{} "
"keyword or the table contains invalid data".format(exc, indx + 1)
)
return dummy
def _convert_other(self, column, field, recformat):
"""Perform conversions on any other fixed-width column data types.
This may not perform any conversion at all if it's not necessary, in
which case the original column array is returned.
"""
if isinstance(recformat, _FormatX):
# special handling for the X format
return self._convert_x(field, recformat)
scale_factors = self._get_scale_factors(column)
_str, _bool, _number, _scale, _zero, bscale, bzero, dim = scale_factors
indx = self.names.index(column.name)
# ASCII table, convert strings to numbers
# TODO:
# For now, check that these are ASCII columns by checking the coldefs
# type; in the future all columns (for binary tables, ASCII tables, or
# otherwise) should "know" what type they are already and how to handle
# converting their data from FITS format to native format and vice
# versa...
if not _str and isinstance(self._coldefs, _AsciiColDefs):
field = self._convert_ascii(column, field)
# Test that the dimensions given in dim are sensible; otherwise
# display a warning and ignore them
if dim:
# See if the dimensions already match, if not, make sure the
# number items will fit in the specified dimensions
if field.ndim > 1:
actual_shape = field.shape[1:]
if _str:
actual_shape = actual_shape + (field.itemsize,)
else:
actual_shape = field.shape[0]
if dim == actual_shape:
# The array already has the correct dimensions, so we
# ignore dim and don't convert
dim = None
else:
nitems = reduce(operator.mul, dim)
if _str:
actual_nitems = field.itemsize
elif len(field.shape) == 1:
# No repeat count in TFORMn, equivalent to 1
actual_nitems = 1
else:
actual_nitems = field.shape[1]
if nitems > actual_nitems and not isinstance(recformat, _FormatP):
warnings.warn(
"TDIM{} value {:d} does not fit with the size of "
"the array items ({:d}). TDIM{:d} will be ignored.".format(
indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1
)
)
dim = None
# further conversion for both ASCII and binary tables
# For now we've made columns responsible for *knowing* whether their
# data has been scaled, but we make the FITS_rec class responsible for
# actually doing the scaling
# TODO: This also needs to be fixed in the effort to make Columns
# responsible for scaling their arrays to/from FITS native values
if not column.ascii and column.format.p_format:
format_code = column.format.p_format
else:
# TODO: Rather than having this if/else it might be nice if the
# ColumnFormat class had an attribute guaranteed to give the format
# of actual values in a column regardless of whether the true
# format is something like P or Q
format_code = column.format.format
if _number and (_scale or _zero) and not column._physical_values:
# This is to handle pseudo unsigned ints in table columns
# TODO: For now this only really works correctly for binary tables
# Should it work for ASCII tables as well?
if self._uint:
if bzero == 2**15 and format_code == "I":
field = np.array(field, dtype=np.uint16)
elif bzero == 2**31 and format_code == "J":
field = np.array(field, dtype=np.uint32)
elif bzero == 2**63 and format_code == "K":
field = np.array(field, dtype=np.uint64)
bzero64 = np.uint64(2**63)
else:
field = np.array(field, dtype=np.float64)
else:
field = np.array(field, dtype=np.float64)
if _scale:
np.multiply(field, bscale, field)
if _zero:
if self._uint and format_code == "K":
# There is a chance of overflow, so be careful
test_overflow = field.copy()
try:
test_overflow += bzero64
except OverflowError:
warnings.warn(
"Overflow detected while applying TZERO{:d}. "
"Returning unscaled data.".format(indx + 1)
)
else:
field = test_overflow
else:
field += bzero
# mark the column as scaled
column._physical_values = True
elif _bool and field.dtype != bool:
field = np.equal(field, ord("T"))
elif _str:
if not self._character_as_bytes:
with suppress(UnicodeDecodeError):
field = decode_ascii(field)
if dim and not isinstance(recformat, _FormatP):
# Apply the new field item dimensions
nitems = reduce(operator.mul, dim)
if field.ndim > 1:
field = field[:, :nitems]
if _str:
fmt = field.dtype.char
dtype = (f"|{fmt}{dim[-1]}", dim[:-1])
field.dtype = dtype
else:
field.shape = (field.shape[0],) + dim
return field
def _get_heap_data(self):
"""
Returns a pointer into the table's raw data to its heap (if present).
This is returned as a numpy byte array.
"""
if self._heapsize:
raw_data = self._get_raw_data().view(np.ubyte)
heap_end = self._heapoffset + self._heapsize
return raw_data[self._heapoffset : heap_end]
else:
return np.array([], dtype=np.ubyte)
def _get_raw_data(self):
"""
Returns the base array of self that "raw data array" that is the
array in the format that it was first read from a file before it was
sliced or viewed as a different type in any way.
This is determined by walking through the bases until finding one that
has at least the same number of bytes as self, plus the heapsize. This
may be the immediate .base but is not always. This is used primarily
for variable-length array support which needs to be able to find the
heap (the raw data *may* be larger than nbytes + heapsize if it
contains a gap or padding).
May return ``None`` if no array resembling the "raw data" according to
the stated criteria can be found.
"""
raw_data_bytes = self.nbytes + self._heapsize
base = self
while hasattr(base, "base") and base.base is not None:
base = base.base
# Variable-length-arrays: should take into account the case of
# empty arrays
if hasattr(base, "_heapoffset"):
if hasattr(base, "nbytes") and base.nbytes > raw_data_bytes:
return base
# non variable-length-arrays
else:
if hasattr(base, "nbytes") and base.nbytes >= raw_data_bytes:
return base
def _get_scale_factors(self, column):
"""Get all the scaling flags and factors for one column."""
# TODO: Maybe this should be a method/property on Column? Or maybe
# it's not really needed at all...
_str = column.format.format == "A"
_bool = column.format.format == "L"
_number = not (_bool or _str)
bscale = column.bscale
bzero = column.bzero
_scale = bscale not in ("", None, 1)
_zero = bzero not in ("", None, 0)
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
# column._dims gives a tuple, rather than column.dim which returns the
# original string format code from the FITS header...
dim = column._dims
return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim)
def _scale_back(self, update_heap_pointers=True):
"""
Update the parent array, using the (latest) scaled array.
If ``update_heap_pointers`` is `False`, this will leave all the heap
pointers in P/Q columns as they are verbatim--it only makes sense to do
this if there is already data on the heap and it can be guaranteed that
that data has not been modified, and there is not new data to add to
the heap. Currently this is only used as an optimization for
CompImageHDU that does its own handling of the heap.
"""
# Running total for the new heap size
heapsize = 0
for indx, name in enumerate(self.dtype.names):
column = self._coldefs[indx]
recformat = column.format.recformat
raw_field = _get_recarray_field(self, indx)
# add the location offset of the heap area for each
# variable length column
if isinstance(recformat, _FormatP):
# Irritatingly, this can return a different dtype than just
# doing np.dtype(recformat.dtype); but this returns the results
# that we want. For example if recformat.dtype is 'a' we want
# an array of characters.
dtype = np.array([], dtype=recformat.dtype).dtype
if update_heap_pointers and name in self._converted:
# The VLA has potentially been updated, so we need to
# update the array descriptors
raw_field[:] = 0 # reset
npts = [np.prod(arr.shape) for arr in self._converted[name]]
raw_field[: len(npts), 0] = npts
raw_field[1:, 1] = (
np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize
)
raw_field[:, 1][:] += heapsize
heapsize += raw_field[:, 0].sum() * dtype.itemsize
# Even if this VLA has not been read or updated, we need to
# include the size of its constituent arrays in the heap size
# total
if heapsize >= 2**31:
raise ValueError(
"The heapsize limit for 'P' format has been reached. "
"Please consider using the 'Q' format for your file."
)
if isinstance(recformat, _FormatX) and name in self._converted:
_wrapx(self._converted[name], raw_field, recformat.repeat)
continue
scale_factors = self._get_scale_factors(column)
_str, _bool, _number, _scale, _zero, bscale, bzero, _ = scale_factors
field = self._converted.get(name, raw_field)
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero) and column._physical_values:
dummy = field.copy()
if _zero:
dummy -= bzero
if _scale:
dummy /= bscale
# This will set the raw values in the recarray back to
# their non-physical storage values, so the column should
# be mark is not scaled
column._physical_values = False
elif _str or isinstance(self._coldefs, _AsciiColDefs):
dummy = field
else:
continue
# ASCII table, convert numbers to strings
if isinstance(self._coldefs, _AsciiColDefs):
self._scale_back_ascii(indx, dummy, raw_field)
# binary table string column
elif isinstance(raw_field, chararray.chararray):
self._scale_back_strings(indx, dummy, raw_field)
# all other binary table columns
else:
if len(raw_field) and isinstance(raw_field[0], np.integer):
dummy = np.around(dummy)
if raw_field.shape == dummy.shape:
raw_field[:] = dummy
else:
# Reshaping the data is necessary in cases where the
# TDIMn keyword was used to shape a column's entries
# into arrays
raw_field[:] = dummy.ravel().view(raw_field.dtype)
del dummy
# ASCII table does not have Boolean type
elif _bool and name in self._converted:
choices = (
np.array([ord("F")], dtype=np.int8)[0],
np.array([ord("T")], dtype=np.int8)[0],
)
raw_field[:] = np.choose(field, choices)
# Store the updated heapsize
self._heapsize = heapsize
def _scale_back_strings(self, col_idx, input_field, output_field):
# There are a few possibilities this has to be able to handle properly
# The input_field, which comes from the _converted column is of dtype
# 'Un' so that elements read out of the array are normal str
# objects (i.e. unicode strings)
#
# At the other end the *output_field* may also be of type 'S' or of
# type 'U'. It will *usually* be of type 'S' because when reading
# an existing FITS table the raw data is just ASCII strings, and
# represented in Numpy as an S array. However, when a user creates
# a new table from scratch, they *might* pass in a column containing
# unicode strings (dtype 'U'). Therefore the output_field of the
# raw array is actually a unicode array. But we still want to make
# sure the data is encodable as ASCII. Later when we write out the
# array we use, in the dtype 'U' case, a different write routine
# that writes row by row and encodes any 'U' columns to ASCII.
# If the output_field is non-ASCII we will worry about ASCII encoding
# later when writing; otherwise we can do it right here
if input_field.dtype.kind == "U" and output_field.dtype.kind == "S":
try:
_ascii_encode(input_field, out=output_field)
except _UnicodeArrayEncodeError as exc:
raise ValueError(
"Could not save column '{}': Contains characters that "
"cannot be encoded as ASCII as required by FITS, starting "
"at the index {!r} of the column, and the index {} of "
"the string at that location.".format(
self._coldefs[col_idx].name,
exc.index[0] if len(exc.index) == 1 else exc.index,
exc.start,
)
)
else:
# Otherwise go ahead and do a direct copy into--if both are type
# 'U' we'll handle encoding later
input_field = input_field.flatten().view(output_field.dtype)
output_field.flat[:] = input_field
# Ensure that blanks at the end of each string are
# converted to nulls instead of spaces, see Trac #15
# and #111
_rstrip_inplace(output_field)
def _scale_back_ascii(self, col_idx, input_field, output_field):
"""
Convert internal array values back to ASCII table representation.
The ``input_field`` is the internal representation of the values, and
the ``output_field`` is the character array representing the ASCII
output that will be written.
"""
starts = self._coldefs.starts[:]
spans = self._coldefs.spans
format = self._coldefs[col_idx].format
# The the index of the "end" column of the record, beyond
# which we can't write
end = super().field(-1).itemsize
starts.append(end + starts[-1])
if col_idx > 0:
lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1]
else:
lead = 0
if lead < 0:
warnings.warn(
f"Column {col_idx + 1} starting point overlaps the previous column."
)
trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx]
if trail < 0:
warnings.warn(
f"Column {col_idx + 1} ending point overlaps the next column."
)
# TODO: It would be nice if these string column formatting
# details were left to a specialized class, as is the case
# with FormatX and FormatP
if "A" in format:
_pc = "{:"
else:
_pc = "{:>"
fmt = "".join([_pc, format[1:], ASCII2STR[format[0]], "}", (" " * trail)])
# Even if the format precision is 0, we should output a decimal point
# as long as there is space to do so--not including a decimal point in
# a float value is discouraged by the FITS Standard
trailing_decimal = format.precision == 0 and format.format in ("F", "E", "D")
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for jdx, value in enumerate(input_field):
value = fmt.format(value)
if len(value) > starts[col_idx + 1] - starts[col_idx]:
raise ValueError(
"Value {!r} does not fit into the output's itemsize of {}.".format(
value, spans[col_idx]
)
)
if trailing_decimal and value[0] == " ":
# We have some extra space in the field for the trailing
# decimal point
value = value[1:] + "."
output_field[jdx] = value
# Replace exponent separator in floating point numbers
if "D" in format:
output_field[:] = output_field.replace(b"E", b"D")
def tolist(self):
# Override .tolist to take care of special case of VLF
column_lists = [self[name].tolist() for name in self.columns.names]
return [list(row) for row in zip(*column_lists)]
def _get_recarray_field(array, key):
"""
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects.
"""
# Numpy >= 1.10.dev recarray no longer returns chararrays for strings
# This is currently needed for backwards-compatibility and for
# automatic truncation of trailing whitespace
field = np.recarray.field(array, key)
if field.dtype.char in ("S", "U") and not isinstance(field, chararray.chararray):
field = field.view(chararray.chararray)
return field
class _UnicodeArrayEncodeError(UnicodeEncodeError):
def __init__(self, encoding, object_, start, end, reason, index):
super().__init__(encoding, object_, start, end, reason)
self.index = index
def _ascii_encode(inarray, out=None):
"""
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded.
"""
out_dtype = np.dtype((f"S{inarray.dtype.itemsize // 4}", inarray.dtype.shape))
if out is not None:
out = out.view(out_dtype)
op_dtypes = [inarray.dtype, out_dtype]
op_flags = [["readonly"], ["writeonly", "allocate"]]
it = np.nditer(
[inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=["zerosize_ok"]
)
try:
for initem, outitem in it:
outitem[...] = initem.item().encode("ascii")
except UnicodeEncodeError as exc:
index = np.unravel_index(it.iterindex, inarray.shape)
raise _UnicodeArrayEncodeError(*(exc.args + (index,)))
return it.operands[1]
def _has_unicode_fields(array):
"""
Returns True if any fields in a structured array have Unicode dtype.
"""
dtypes = (d[0] for d in array.dtype.fields.values())
return any(d.kind == "U" for d in dtypes)
|
8480d4780df69f378282bda055d506db8fb76a56899962154d55cf50bcbf170a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.table import Column, MaskedColumn
from astropy.table.column import col_copy
from astropy.time import Time, TimeDelta
from astropy.time.core import BARYCENTRIC_SCALES
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.utils.exceptions import AstropyUserWarning
from . import Card, Header
# The following is based on the FITS WCS Paper IV, "Representations of time
# coordinates in FITS".
# https://ui.adsabs.harvard.edu/abs/2015A%26A...574A..36R
# FITS WCS standard specified "4-3" form for non-linear coordinate types
TCTYP_RE_TYPE = re.compile(r"(?P<type>[A-Z]+)[-]+")
TCTYP_RE_ALGO = re.compile(r"(?P<algo>[A-Z]+)\s*")
# FITS Time standard specified time units
FITS_TIME_UNIT = ["s", "d", "a", "cy", "min", "h", "yr", "ta", "Ba"]
# Global time reference coordinate keywords
OBSGEO_XYZ = ("OBSGEO-X", "OBSGEO-Y", "OBSGEO-Z")
OBSGEO_LBH = ("OBSGEO-L", "OBSGEO-B", "OBSGEO-H")
TIME_KEYWORDS = (
(
"DATE",
"DATE-AVG",
"DATE-BEG",
"DATE-END",
"DATE-OBS",
"DATEREF",
"JDREF",
"MJD-AVG",
"MJD-BEG",
"MJD-END",
"MJD-OBS",
"MJDREF",
"TIMEOFFS",
"TIMESYS",
"TIMEUNIT",
"TREFDIR",
"TREFPOS",
)
+ OBSGEO_LBH
+ OBSGEO_XYZ
)
# Column-specific time override keywords
COLUMN_TIME_KEYWORDS = ("TCTYP", "TCUNI", "TRPOS")
# Column-specific keywords regex
COLUMN_TIME_KEYWORD_REGEXP = f"({'|'.join(COLUMN_TIME_KEYWORDS)})[0-9]+"
def is_time_column_keyword(keyword):
"""
Check if the FITS header keyword is a time column-specific keyword.
Parameters
----------
keyword : str
FITS keyword.
"""
return re.match(COLUMN_TIME_KEYWORD_REGEXP, keyword) is not None
# Set astropy time global information
GLOBAL_TIME_INFO = {
"TIMESYS": ("UTC", "Default time scale"),
"JDREF": (0.0, "Time columns are jd = jd1 + jd2"),
"TREFPOS": ("TOPOCENTER", "Time reference position"),
}
def _verify_global_info(global_info):
"""
Given the global time reference frame information, verify that
each global time coordinate attribute will be given a valid value.
Parameters
----------
global_info : dict
Global time reference frame information.
"""
# Translate FITS deprecated scale into astropy scale, or else just convert
# to lower case for further checks.
global_info["scale"] = FITS_DEPRECATED_SCALES.get(
global_info["TIMESYS"], global_info["TIMESYS"].lower()
)
# Verify global time scale
if global_info["scale"] not in Time.SCALES:
# 'GPS' and 'LOCAL' are FITS recognized time scale values
# but are not supported by astropy.
if global_info["scale"] == "gps":
warnings.warn(
"Global time scale (TIMESYS) has a FITS recognized time scale "
'value "GPS". In Astropy, "GPS" is a time from epoch format '
"which runs synchronously with TAI; GPS is approximately 19 s "
"ahead of TAI. Hence, this format will be used.",
AstropyUserWarning,
)
# Assume that the values are in GPS format
global_info["scale"] = "tai"
global_info["format"] = "gps"
if global_info["scale"] == "local":
warnings.warn(
"Global time scale (TIMESYS) has a FITS recognized time scale "
'value "LOCAL". However, the standard states that "LOCAL" should be '
"tied to one of the existing scales because it is intrinsically "
"unreliable and/or ill-defined. Astropy will thus use the default "
'global time scale "UTC" instead of "LOCAL".',
AstropyUserWarning,
)
# Default scale 'UTC'
global_info["scale"] = "utc"
global_info["format"] = None
else:
raise AssertionError(
"Global time scale (TIMESYS) should have a FITS recognized "
"time scale value (got {!r}). The FITS standard states that "
"the use of local time scales should be restricted to alternate "
"coordinates.".format(global_info["TIMESYS"])
)
else:
# Scale is already set
global_info["format"] = None
# Check if geocentric global location is specified
obs_geo = [global_info[attr] for attr in OBSGEO_XYZ if attr in global_info]
# Location full specification is (X, Y, Z)
if len(obs_geo) == 3:
global_info["location"] = EarthLocation.from_geocentric(*obs_geo, unit=u.m)
else:
# Check if geodetic global location is specified (since geocentric failed)
# First warn the user if geocentric location is partially specified
if obs_geo:
warnings.warn(
"The geocentric observatory location {} is not completely "
"specified (X, Y, Z) and will be ignored.".format(obs_geo),
AstropyUserWarning,
)
# Check geodetic location
obs_geo = [global_info[attr] for attr in OBSGEO_LBH if attr in global_info]
if len(obs_geo) == 3:
global_info["location"] = EarthLocation.from_geodetic(*obs_geo)
else:
# Since both geocentric and geodetic locations are not specified,
# location will be None.
# Warn the user if geodetic location is partially specified
if obs_geo:
warnings.warn(
"The geodetic observatory location {} is not completely "
"specified (lon, lat, alt) and will be ignored.".format(obs_geo),
AstropyUserWarning,
)
global_info["location"] = None
# Get global time reference
# Keywords are listed in order of precedence, as stated by the standard
for key, format_ in (("MJDREF", "mjd"), ("JDREF", "jd"), ("DATEREF", "fits")):
if key in global_info:
global_info["ref_time"] = {"val": global_info[key], "format": format_}
break
else:
# If none of the three keywords is present, MJDREF = 0.0 must be assumed
global_info["ref_time"] = {"val": 0, "format": "mjd"}
def _verify_column_info(column_info, global_info):
"""
Given the column-specific time reference frame information, verify that
each column-specific time coordinate attribute has a valid value.
Return True if the coordinate column is time, or else return False.
Parameters
----------
global_info : dict
Global time reference frame information.
column_info : dict
Column-specific time reference frame override information.
"""
scale = column_info.get("TCTYP", None)
unit = column_info.get("TCUNI", None)
location = column_info.get("TRPOS", None)
if scale is not None:
# Non-linear coordinate types have "4-3" form and are not time coordinates
if TCTYP_RE_TYPE.match(scale[:5]) and TCTYP_RE_ALGO.match(scale[5:]):
return False
elif scale.lower() in Time.SCALES:
column_info["scale"] = scale.lower()
column_info["format"] = None
elif scale in FITS_DEPRECATED_SCALES.keys():
column_info["scale"] = FITS_DEPRECATED_SCALES[scale]
column_info["format"] = None
# TCTYPn (scale) = 'TIME' indicates that the column scale is
# controlled by the global scale.
elif scale == "TIME":
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
elif scale == "GPS":
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "GPS". '
'In Astropy, "GPS" is a time from epoch format which runs '
"synchronously with TAI; GPS runs ahead of TAI approximately "
"by 19 s. Hence, this format will be used.".format(column_info),
AstropyUserWarning,
)
column_info["scale"] = "tai"
column_info["format"] = "gps"
elif scale == "LOCAL":
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "LOCAL". '
'However, the standard states that "LOCAL" should be tied to one '
"of the existing scales because it is intrinsically unreliable "
"and/or ill-defined. Astropy will thus use the global time scale "
"(TIMESYS) as the default.".format(column_info),
AstropyUserWarning,
)
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
else:
# Coordinate type is either an unrecognized local time scale
# or a linear coordinate type
return False
# If TCUNIn is a time unit or TRPOSn is specified, the column is a time
# coordinate. This has to be tested since TCTYP (scale) is not specified.
elif (unit is not None and unit in FITS_TIME_UNIT) or location is not None:
column_info["scale"] = global_info["scale"]
column_info["format"] = global_info["format"]
# None of the conditions for time coordinate columns is satisfied
else:
return False
# Check if column-specific reference position TRPOSn is specified
if location is not None:
# Observatory position (location) needs to be specified only
# for 'TOPOCENTER'.
if location == "TOPOCENTER":
column_info["location"] = global_info["location"]
if column_info["location"] is None:
warnings.warn(
'Time column reference position "TRPOSn" value is "TOPOCENTER". '
"However, the observatory position is not properly specified. "
"The FITS standard does not support this and hence reference "
"position will be ignored.",
AstropyUserWarning,
)
else:
column_info["location"] = None
# Warn user about ignoring global reference position when TRPOSn is
# not specified
elif global_info["TREFPOS"] == "TOPOCENTER":
if global_info["location"] is not None:
warnings.warn(
'Time column reference position "TRPOSn" is not specified. The '
'default value for it is "TOPOCENTER", and the observatory position '
"has been specified. However, for supporting column-specific location, "
"reference position will be ignored for this column.",
AstropyUserWarning,
)
column_info["location"] = None
else:
column_info["location"] = None
# Get reference time
column_info["ref_time"] = global_info["ref_time"]
return True
def _get_info_if_time_column(col, global_info):
"""
Check if a column without corresponding time column keywords in the
FITS header represents time or not. If yes, return the time column
information needed for its conversion to Time.
This is only applicable to the special-case where a column has the
name 'TIME' and a time unit.
"""
# Column with TTYPEn = 'TIME' and lacking any TC*n or time
# specific keywords will be controlled by the global keywords.
if col.info.name.upper() == "TIME" and col.info.unit in FITS_TIME_UNIT:
column_info = {
"scale": global_info["scale"],
"format": global_info["format"],
"ref_time": global_info["ref_time"],
"location": None,
}
if global_info["TREFPOS"] == "TOPOCENTER":
column_info["location"] = global_info["location"]
if column_info["location"] is None:
warnings.warn(
'Time column "{}" reference position will be ignored '
"due to unspecified observatory position.".format(col.info.name),
AstropyUserWarning,
)
return column_info
return None
def _convert_global_time(table, global_info):
"""
Convert the table metadata for time informational keywords
to astropy Time.
Parameters
----------
table : `~astropy.table.Table`
The table whose time metadata is to be converted.
global_info : dict
Global time reference frame information.
"""
# Read in Global Informational keywords as Time
for key, value in global_info.items():
# FITS uses a subset of ISO-8601 for DATE-xxx
if key not in table.meta:
try:
table.meta[key] = _convert_time_key(global_info, key)
except ValueError:
pass
def _convert_time_key(global_info, key):
"""
Convert a time metadata key to a Time object.
Parameters
----------
global_info : dict
Global time reference frame information.
key : str
Time key.
Returns
-------
astropy.time.Time
Raises
------
ValueError
If key is not a valid global time keyword.
"""
value = global_info[key]
if key.startswith("DATE"):
scale = "utc" if key == "DATE" else global_info["scale"]
precision = len(value.split(".")[-1]) if "." in value else 0
return Time(value, format="fits", scale=scale, precision=precision)
# MJD-xxx in MJD according to TIMESYS
elif key.startswith("MJD-"):
return Time(value, format="mjd", scale=global_info["scale"])
else:
raise ValueError("Key is not a valid global time keyword")
def _convert_time_column(col, column_info):
"""
Convert time columns to astropy Time columns.
Parameters
----------
col : `~astropy.table.Column`
The time coordinate column to be converted to Time.
column_info : dict
Column-specific time reference frame override information.
"""
# The code might fail while attempting to read FITS files not written by astropy.
try:
# ISO-8601 is the only string representation of time in FITS
if col.info.dtype.kind in ["S", "U"]:
# [+/-C]CCYY-MM-DD[Thh:mm:ss[.s...]] where the number of characters
# from index 20 to the end of string represents the precision
precision = max(int(col.info.dtype.str[2:]) - 20, 0)
return Time(
col,
format="fits",
scale=column_info["scale"],
precision=precision,
location=column_info["location"],
)
if column_info["format"] == "gps":
return Time(col, format="gps", location=column_info["location"])
# If reference value is 0 for JD or MJD, the column values can be
# directly converted to Time, as they are absolute (relative
# to a globally accepted zero point).
if column_info["ref_time"]["val"] == 0 and column_info["ref_time"][
"format"
] in ["jd", "mjd"]:
# (jd1, jd2) where jd = jd1 + jd2
if col.shape[-1] == 2 and col.ndim > 1:
return Time(
col[..., 0],
col[..., 1],
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
else:
return Time(
col,
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
# Reference time
ref_time = Time(
column_info["ref_time"]["val"],
scale=column_info["scale"],
format=column_info["ref_time"]["format"],
location=column_info["location"],
)
# Elapsed time since reference time
if col.shape[-1] == 2 and col.ndim > 1:
delta_time = TimeDelta(col[..., 0], col[..., 1])
else:
delta_time = TimeDelta(col)
return ref_time + delta_time
except Exception as err:
warnings.warn(
'The exception "{}" was encountered while trying to convert the time '
'column "{}" to Astropy Time.'.format(err, col.info.name),
AstropyUserWarning,
)
return col
def fits_to_time(hdr, table):
"""
Read FITS binary table time columns as `~astropy.time.Time`.
This method reads the metadata associated with time coordinates, as
stored in a FITS binary table header, converts time columns into
`~astropy.time.Time` columns and reads global reference times as
`~astropy.time.Time` instances.
Parameters
----------
hdr : `~astropy.io.fits.header.Header`
FITS Header
table : `~astropy.table.Table`
The table whose time columns are to be read as Time
Returns
-------
hdr : `~astropy.io.fits.header.Header`
Modified FITS Header (time metadata removed)
"""
# Set defaults for global time scale, reference, etc.
global_info = {"TIMESYS": "UTC", "TREFPOS": "TOPOCENTER"}
# Set default dictionary for time columns
time_columns = defaultdict(OrderedDict)
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = hdr.copy(strip=True)
# Scan the header for global and column-specific time keywords
for key, value, comment in hdr.cards:
if key in TIME_KEYWORDS:
global_info[key] = value
hcopy.remove(key)
elif is_time_column_keyword(key):
base, idx = re.match(r"([A-Z]+)([0-9]+)", key).groups()
time_columns[int(idx)][base] = value
hcopy.remove(key)
elif value in OBSGEO_XYZ and re.match("TTYPE[0-9]+", key):
global_info[value] = table[value]
# Verify and get the global time reference frame information
_verify_global_info(global_info)
_convert_global_time(table, global_info)
# Columns with column-specific time (coordinate) keywords
if time_columns:
for idx, column_info in time_columns.items():
# Check if the column is time coordinate (not spatial)
if _verify_column_info(column_info, global_info):
colname = table.colnames[idx - 1]
# Convert to Time
table[colname] = _convert_time_column(table[colname], column_info)
# Check for special-cases of time coordinate columns
for idx, colname in enumerate(table.colnames):
if (idx + 1) not in time_columns:
column_info = _get_info_if_time_column(table[colname], global_info)
if column_info:
table[colname] = _convert_time_column(table[colname], column_info)
return hcopy
def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
# Make a light copy of table (to the extent possible) and clear any indices along
# the way. Indices are not serialized and cause problems later, but they are not
# needed here so just drop. For Column subclasses take advantage of copy() method,
# but for others it is required to actually copy the data if there are attached
# indices. See #8077 and #9009 for further discussion.
new_cols = []
for col in table.itercols():
if isinstance(col, Column):
new_col = col.copy(copy_data=False) # Also drops any indices
else:
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
newtable = table.__class__(new_cols, copy=False)
newtable.meta = table.meta
# Global time coordinate frame keywords
hdr = Header(
[
Card(keyword=key, value=val[0], comment=val[1])
for key, val in GLOBAL_TIME_INFO.items()
]
)
# Store coordinate column-specific metadata
newtable.meta["__coordinate_columns__"] = defaultdict(OrderedDict)
coord_meta = newtable.meta["__coordinate_columns__"]
time_cols = table.columns.isinstance(Time)
# Geocentric location
location = None
for col in time_cols:
# By default, Time objects are written in full precision, i.e. we store both
# jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for
# Time can be stored if the user explicitly chooses to do so.
col_cls = MaskedColumn if col.masked else Column
if col.info.serialize_method["fits"] == "formatted_value":
newtable.replace_column(col.info.name, col_cls(col.value))
continue
# The following is necessary to deal with multi-dimensional ``Time`` objects
# (i.e. where Time.shape is non-trivial).
jd12 = np.stack([col.jd1, col.jd2], axis=-1)
# Roll the 0th (innermost) axis backwards, until it lies in the last position
# (jd12.ndim)
newtable.replace_column(col.info.name, col_cls(jd12, unit="d"))
# Time column-specific override keywords
coord_meta[col.info.name]["coord_type"] = col.scale.upper()
coord_meta[col.info.name]["coord_unit"] = "d"
# Time column reference position
if col.location is None:
coord_meta[col.info.name]["time_ref_pos"] = None
if location is not None:
warnings.warn(
'Time Column "{}" has no specified location, but global Time '
"Position is present, which will be the default for this column "
"in FITS specification.".format(col.info.name),
AstropyUserWarning,
)
else:
coord_meta[col.info.name]["time_ref_pos"] = "TOPOCENTER"
# Compatibility of Time Scales and Reference Positions
if col.scale in BARYCENTRIC_SCALES:
warnings.warn(
'Earth Location "TOPOCENTER" for Time Column "{}" is incompatible '
'with scale "{}".'.format(col.info.name, col.scale.upper()),
AstropyUserWarning,
)
if location is None:
# Set global geocentric location
location = col.location
if location.size > 1:
for dim in ("x", "y", "z"):
newtable.add_column(
Column(getattr(location, dim).to_value(u.m)),
name=f"OBSGEO-{dim.upper()}",
)
else:
hdr.extend(
[
Card(
keyword=f"OBSGEO-{dim.upper()}",
value=getattr(location, dim).to_value(u.m),
)
for dim in ("x", "y", "z")
]
)
elif np.any(location != col.location):
raise ValueError(
"Multiple Time Columns with different geocentric "
"observatory locations ({}, {}) encountered."
"This is not supported by the FITS standard.".format(
location, col.location
)
)
return newtable, hdr
|
57b20618e41427ca256b07499a4d3a1e4512f02a92df4cb38dc0dc20e2526557 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
"""Convenience functions for working with FITS files.
Convenience functions
=====================
The functions in this module provide shortcuts for some of the most basic
operations on FITS files, such as reading and updating the header. They are
included directly in the 'astropy.io.fits' namespace so that they can be used
like::
astropy.io.fits.getheader(...)
These functions are primarily for convenience when working with FITS files in
the command-line interpreter. If performing several operations on the same
file, such as in a script, it is better to *not* use these functions, as each
one must open and re-parse the file. In such cases it is better to use
:func:`astropy.io.fits.open` and work directly with the
:class:`astropy.io.fits.HDUList` object and underlying HDU objects.
Several of the convenience functions, such as `getheader` and `getdata` support
special arguments for selecting which HDU to use when working with a
multi-extension FITS file. There are a few supported argument formats for
selecting the HDU. See the documentation for `getdata` for an
explanation of all the different formats.
.. warning::
All arguments to convenience functions other than the filename that are
*not* for selecting the HDU should be passed in as keyword
arguments. This is to avoid ambiguity and conflicts with the
HDU arguments. For example, to set NAXIS=1 on the Primary HDU:
Wrong::
astropy.io.fits.setval('myimage.fits', 'NAXIS', 1)
The above example will try to set the NAXIS value on the first extension
HDU to blank. That is, the argument '1' is assumed to specify an
HDU.
Right::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1)
This will set the NAXIS keyword to 1 on the primary HDU (the default). To
specify the first extension HDU use::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1, ext=1)
This complexity arises out of the attempt to simultaneously support
multiple argument formats that were used in past versions of PyFITS.
Unfortunately, it is not possible to support all formats without
introducing some ambiguity. A future Astropy release may standardize
around a single format and officially deprecate the other formats.
"""
import operator
import os
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .diff import FITSDiff, HDUDiff
from .file import FILE_MODES, _File
from .hdu.base import _BaseHDU, _ValidHDU
from .hdu.hdulist import HDUList, fitsopen
from .hdu.image import ImageHDU, PrimaryHDU
from .hdu.table import BinTableHDU
from .header import Header
from .util import (
_is_dask_array,
_is_int,
fileobj_closed,
fileobj_mode,
fileobj_name,
path_like,
)
__all__ = [
"getheader",
"getdata",
"getval",
"setval",
"delval",
"writeto",
"append",
"update",
"info",
"tabledump",
"tableload",
"table_to_hdu",
"printdiff",
]
def getheader(filename, *args, **kwargs):
"""
Get the header from an HDU of a FITS file.
Parameters
----------
filename : path-like or file-like
File to get header from. If an opened file object, its mode
must be one of the following rb, rb+, or ab+).
ext, extname, extver
The rest of the arguments are for HDU specification. See the
`getdata` documentation for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
header : `Header` object
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
header = hdu.header
finally:
hdulist.close(closed=closed)
return header
def getdata(filename, *args, header=None, lower=None, upper=None, view=None, **kwargs):
"""
Get the data from an HDU of a FITS file (and optionally the
header).
Parameters
----------
filename : path-like or file-like
File to get data from. If opened, mode must be one of the
following rb, rb+, or ab+.
ext
The rest of the arguments are for HDU specification.
They are flexible and are best illustrated by examples.
No extra arguments implies the primary HDU::
getdata('in.fits')
.. note::
Exclusive to ``getdata``: if ``ext`` is not specified
and primary header contains no data, ``getdata`` attempts
to retrieve data from first extension HDU.
By HDU number::
getdata('in.fits', 0) # the primary HDU
getdata('in.fits', 2) # the second extension HDU
getdata('in.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique)::
getdata('in.fits', 'sci')
getdata('in.fits', extname='sci') # equivalent
Note ``EXTNAME`` values are not case sensitive
By combination of ``EXTNAME`` and EXTVER`` as separate
arguments or as a tuple::
getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
getdata('in.fits', extname='sci', extver=2) # equivalent
getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
getdata('in.fits', ext=('sci',1), extname='err', extver=2)
header : bool, optional
If `True`, return the data and the header of the specified HDU as a
tuple.
lower, upper : bool, optional
If ``lower`` or ``upper`` are `True`, the field names in the
returned data object will be converted to lower or upper case,
respectively.
view : ndarray, optional
When given, the data will be returned wrapped in the given ndarray
subclass by calling::
data.view(view)
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
array : ndarray or `~numpy.recarray` or `~astropy.io.fits.Group`
Type depends on the type of the extension being referenced.
If the optional keyword ``header`` is set to `True`, this
function will return a (``data``, ``header``) tuple.
Raises
------
IndexError
If no data is found in searched HDUs.
"""
mode, closed = _get_file_mode(filename)
ext = kwargs.get("ext")
extname = kwargs.get("extname")
extver = kwargs.get("extver")
ext_given = not (
len(args) == 0 and ext is None and extname is None and extver is None
)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
data = hdu.data
if data is None:
if ext_given:
raise IndexError(f"No data in HDU #{extidx}.")
# fallback to the first extension HDU
if len(hdulist) == 1:
raise IndexError("No data in Primary HDU and no extension HDU found.")
hdu = hdulist[1]
data = hdu.data
if data is None:
raise IndexError("No data in either Primary or first extension HDUs.")
if header:
hdr = hdu.header
finally:
hdulist.close(closed=closed)
# Change case of names if requested
trans = None
if lower:
trans = operator.methodcaller("lower")
elif upper:
trans = operator.methodcaller("upper")
if trans:
if data.dtype.names is None:
# this data does not have fields
return
if data.dtype.descr[0][0] == "":
# this data does not have fields
return
data.dtype.names = [trans(n) for n in data.dtype.names]
# allow different views into the underlying ndarray. Keep the original
# view just in case there is a problem
if isinstance(view, type) and issubclass(view, np.ndarray):
data = data.view(view)
if header:
return data, hdr
else:
return data
def getval(filename, keyword, *args, **kwargs):
"""
Get a keyword's value from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object (if opened, mode must be
one of the following rb, rb+, or ab+).
keyword : str
Keyword name
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
Returns
-------
keyword value : str, int, or float
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
hdr = getheader(filename, *args, **kwargs)
return hdr[keyword]
def setval(
filename,
keyword,
*args,
value=None,
comment=None,
before=None,
after=None,
savecomment=False,
**kwargs,
):
"""
Set a keyword's value from a header in a FITS file.
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no ``before`` or
``after`` is specified, it will be appended at the end.
When updating more than one keyword in a file, this convenience
function is a much less efficient approach compared with opening
the file for update, modifying the header, and closing the file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str
Keyword name
value : str, int, float, optional
Keyword value (default: `None`, meaning don't modify)
comment : str, optional
Keyword comment, (default: `None`, meaning don't modify)
before : str, int, optional
Name of the keyword, or index of the card before which the new card
will be placed. The argument ``before`` takes precedence over
``after`` if both are specified (default: `None`).
after : str, int, optional
Name of the keyword, or index of the card after which the new card will
be placed. (default: `None`).
savecomment : bool, optional
When `True`, preserve the current comment for an existing keyword. The
argument ``savecomment`` takes precedence over ``comment`` if both
specified. If ``comment`` is not specified then the current comment
will automatically be preserved (default: `False`).
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, "update", *args, **kwargs)
try:
if keyword in hdulist[extidx].header and savecomment:
comment = None
hdulist[extidx].header.set(keyword, value, comment, before, after)
finally:
hdulist.close(closed=closed)
def delval(filename, keyword, *args, **kwargs):
"""
Delete all instances of keyword from a header in a FITS file.
Parameters
----------
filename : path-like or file-like
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str, int
Keyword name or index
ext, extname, extver
The rest of the arguments are for HDU specification.
See `getdata` for explanations/examples.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if "do_not_scale_image_data" not in kwargs:
kwargs["do_not_scale_image_data"] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, "update", *args, **kwargs)
try:
del hdulist[extidx].header[keyword]
finally:
hdulist.close(closed=closed)
def writeto(
filename,
data,
header=None,
output_verify="exception",
overwrite=False,
checksum=False,
):
"""
Create a new FITS file using the supplied data/header.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened in a writable binary
mode such as 'wb' or 'ab+'.
data : array or `~numpy.recarray` or `~astropy.io.fits.Group`
data to write to the new file
header : `Header` object, optional
the header associated with ``data``. If `None`, a header
of the appropriate type is created for the supplied data. This
argument is optional.
output_verify : str
Output verification option. Must be one of ``"fix"``, ``"silentfix"``,
``"ignore"``, ``"warn"``, or ``"exception"``. May also be any
combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See
:ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool, optional
If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
headers of all HDU's written to the file.
"""
hdu = _makehdu(data, header)
if hdu.is_image and not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
hdu.writeto(
filename, overwrite=overwrite, output_verify=output_verify, checksum=checksum
)
def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .column import python_to_tdisp
from .connect import REMOVE_KEYWORDS, is_column_keyword
# Header to store Time related metadata
hdr = None
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from astropy.table.column import BaseColumn
from astropy.time import Time
from astropy.units import Quantity
from .fitstime import time_to_fits
# Only those columns which are instances of BaseColumn, Quantity or Time can
# be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(
f"cannot write table with mixin column(s) {unsupported_names}"
)
time_cols = table.columns.isinstance(Time)
if time_cols:
table, hdr = time_to_fits(table)
# Create a new HDU object
tarray = table.as_array()
if isinstance(tarray, np.ma.MaskedArray):
# Fill masked values carefully:
# float column's default mask value needs to be Nan and
# string column's default mask should be an empty string.
# Note: getting the fill value for the structured array is
# more reliable than for individual columns for string entries.
# (no 'N/A' for a single-element string, where it should be 'N').
default_fill_value = np.ma.default_fill_value(tarray.dtype)
for colname, (coldtype, _) in tarray.dtype.fields.items():
if np.all(tarray.fill_value[colname] == default_fill_value[colname]):
# Since multi-element columns with dtypes such as '2f8' have
# a subdtype, we should look up the type of column on that.
coltype = (
coldtype.subdtype[0].type if coldtype.subdtype else coldtype.type
)
if issubclass(coltype, np.complexfloating):
tarray.fill_value[colname] = complex(np.nan, np.nan)
elif issubclass(coltype, np.inexact):
tarray.fill_value[colname] = np.nan
elif issubclass(coltype, np.character):
tarray.fill_value[colname] = ""
# TODO: it might be better to construct the FITS table directly from
# the Table columns, rather than go via a structured array.
table_hdu = BinTableHDU.from_columns(
tarray.filled(), header=hdr, character_as_bytes=character_as_bytes
)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# with non-default fill values in FITS (if at all possible).
int_formats = ("B", "I", "J", "K")
if not (col.format in int_formats or col.format.p_format in int_formats):
continue
fill_value = tarray[col.name].fill_value
col.null = fill_value.astype(int)
else:
table_hdu = BinTableHDU.from_columns(
tarray, header=hdr, character_as_bytes=character_as_bytes
)
# Set units and format display for output HDU
for col in table_hdu.columns:
if table[col.name].info.format is not None:
# check for boolean types, special format case
logical = table[col.name].info.dtype == bool
tdisp_format = python_to_tdisp(
table[col.name].info.format, logical_dtype=logical
)
if tdisp_format is not None:
col.disp = tdisp_format
unit = table[col.name].unit
if unit is not None:
# Local imports to avoid importing units when it is not required,
# e.g. for command-line scripts
from astropy.units import Unit
from astropy.units.format.fits import UnitScaleError
try:
col.unit = unit.to_string(format="fits")
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
f"The column '{col.name}' could not be stored in FITS "
f"format because it has a scale '({str(scale)})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units."
)
except ValueError:
# Warn that the unit is lost, but let the details depend on
# whether the column was serialized (because it was a
# quantity), since then the unit can be recovered by astropy.
warning = (
f"The unit '{unit.to_string()}' could not be saved in "
"native FITS format "
)
if any(
"SerializedColumn" in item and "name: " + col.name in item
for item in table.meta.get("comments", [])
):
warning += (
"and hence will be lost to non-astropy fits readers. "
"Within astropy, the unit can roundtrip using QTable, "
"though one has to enable the unit before reading."
)
else:
warning += (
"and cannot be recovered in reading. It can roundtrip "
"within astropy by using QTable both to write and read "
"back, though one has to enable the unit before reading."
)
warnings.warn(warning, AstropyUserWarning)
else:
# Try creating a Unit to issue a warning if the unit is not
# FITS compliant
Unit(col.unit, format="fits", parse_strict="warn")
# Column-specific override keywords for coordinate columns
coord_meta = table.meta.pop("__coordinate_columns__", {})
for col_name, col_info in coord_meta.items():
col = table_hdu.columns[col_name]
# Set the column coordinate attributes from data saved earlier.
# Note: have to set these, even if we have no data.
for attr in "coord_type", "coord_unit":
setattr(col, attr, col_info.get(attr, None))
trpos = col_info.get("time_ref_pos", None)
if trpos is not None:
col.time_ref_pos = trpos
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
f"Meta-data keyword {key} will be ignored since it conflicts "
"with a FITS reserved keyword",
AstropyUserWarning,
)
continue
# Convert to FITS format
if key == "comments":
key = "comment"
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
return table_hdu
def append(filename, data, header=None, checksum=False, verify=True, **kwargs):
"""
Append the header/data to FITS file if filename exists, create if not.
If only ``data`` is supplied, a minimal header is created.
Parameters
----------
filename : path-like or file-like
File to write to. If opened, must be opened for update (rb+) unless it
is a new file, then it must be opened for append (ab+). A file or
`~gzip.GzipFile` object opened for update will be closed after return.
data : array, :class:`~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for appending.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
checksum : bool, optional
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
of the HDU when written to the file.
verify : bool, optional
When `True`, the existing FITS file will be read in to verify it for
correctness before appending. When `False`, content is simply appended
to the end of the file. Setting ``verify`` to `False` can be much
faster.
**kwargs
Additional arguments are passed to:
- `~astropy.io.fits.writeto` if the file does not exist or is empty.
In this case ``output_verify`` is the only possible argument.
- `~astropy.io.fits.open` if ``verify`` is True or if ``filename``
is a file object.
- Otherwise no additional arguments can be used.
"""
if isinstance(filename, path_like):
filename = os.path.expanduser(filename)
name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename)
if noexist_or_empty:
#
# The input file or file like object either doesn't exits or is
# empty. Use the writeto convenience function to write the
# output to the empty object.
#
writeto(filename, data, header, checksum=checksum, **kwargs)
else:
hdu = _makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
if verify or not closed:
f = fitsopen(filename, mode="append", **kwargs)
try:
f.append(hdu)
# Set a flag in the HDU so that only this HDU gets a checksum
# when writing the file.
hdu._output_checksum = checksum
finally:
f.close(closed=closed)
else:
f = _File(filename, mode="append")
try:
hdu._output_checksum = checksum
hdu._writeto(f)
finally:
f.close()
def update(filename, data, *args, **kwargs):
"""
Update the specified HDU with the input data/header.
Parameters
----------
filename : path-like or file-like
File to update. If opened, mode must be update (rb+). An opened file
object or `~gzip.GzipFile` object will be closed upon return.
data : array, `~astropy.table.Table`, or `~astropy.io.fits.Group`
The new data used for updating.
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
ext, extname, extver
The rest of the arguments are flexible: the 3rd argument can be the
header associated with the data. If the 3rd argument is not a
`Header`, it (and other positional arguments) are assumed to be the
HDU specification(s). Header and HDU specs can also be
keyword arguments. For example::
update(file, dat, hdr, 'sci') # update the 'sci' extension
update(file, dat, 3) # update the 3rd extension HDU
update(file, dat, hdr, 3) # update the 3rd extension HDU
update(file, dat, 'sci', 2) # update the 2nd extension HDU named 'sci'
update(file, dat, 3, header=hdr) # update the 3rd extension HDU
update(file, dat, header=hdr, ext=5) # update the 5th extension HDU
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
# The arguments to this function are a bit trickier to deal with than others
# in this module, since the documentation has promised that the header
# argument can be an optional positional argument.
if args and isinstance(args[0], Header):
header = args[0]
args = args[1:]
else:
header = None
# The header can also be a keyword argument--if both are provided the
# keyword takes precedence
header = kwargs.pop("header", header)
new_hdu = _makehdu(data, header)
closed = fileobj_closed(filename)
hdulist, _ext = _getext(filename, "update", *args, **kwargs)
try:
hdulist[_ext] = new_hdu
finally:
hdulist.close(closed=closed)
def info(filename, output=None, **kwargs):
"""
Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each HDU.
Parameters
----------
filename : path-like or file-like
FITS file to obtain info from. If opened, mode must be one of
the following: rb, rb+, or ab+ (i.e. the file must be readable).
output : file, bool, optional
A file-like object to write the output to. If ``False``, does not
output to a file and instead returns a list of tuples representing the
HDU info. Writes to ``sys.stdout`` by default.
**kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function sets ``ignore_missing_end=True`` by default.
"""
mode, closed = _get_file_mode(filename, default="readonly")
# Set the default value for the ignore_missing_end parameter
if "ignore_missing_end" not in kwargs:
kwargs["ignore_missing_end"] = True
f = fitsopen(filename, mode=mode, **kwargs)
try:
ret = f.info(output=output)
finally:
if closed:
f.close()
return ret
def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {
key: kwargs.pop(key) for key in ["ext", "extname", "extver"] if key in kwargs
}
has_extensions = args or extension
if isinstance(inputa, str) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError(
"Extension specification with HDUList objects not implemented."
)
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report())
def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1, overwrite=False):
"""
Dump a table HDU to a file in ASCII format. The table may be
dumped in three separate files, one containing column definitions,
one containing header parameters, and one for table data.
Parameters
----------
filename : path-like or file-like
Input fits file.
datafile : path-like or file-like, optional
Output data file. The default is the root name of the input
fits file appended with an underscore, followed by the
extension number (ext), followed by the extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`,
no column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
ext : int
The number of the extension containing the table HDU to be
dumped.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `tabledump` function is to allow editing in a
standard text editor of the table data and parameters. The
`tableload` function can be used to reassemble the table from the
three ASCII files.
"""
# allow file object to already be opened in any of the valid modes
# and leave the file in the same state (opened or closed) as when
# the function was called
mode, closed = _get_file_mode(filename, default="readonly")
f = fitsopen(filename, mode=mode)
# Create the default data file name if one was not provided
try:
if not datafile:
root, tail = os.path.splitext(f._file.name)
datafile = root + "_" + repr(ext) + ".txt"
# Dump the data from the HDU to the files
f[ext].dump(datafile, cdfile, hfile, overwrite)
finally:
if closed:
f.close()
if isinstance(tabledump.__doc__, str):
tabledump.__doc__ += BinTableHDU._tdump_file_format.replace("\n", "\n ")
def tableload(datafile, cdfile, hfile=None):
"""
Create a table from the input ASCII files. The input is from up
to three separate files, one containing column definitions, one
containing header parameters, and one containing column data. The
header parameters file is not required. When the header
parameters file is absent a minimal header is constructed.
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like
Input column definition file containing the names, formats,
display formats, physical units, multidimensional array
dimensions, undefined values, scale factors, and offsets
associated with the columns in the table.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table.
If `None`, a minimal header is constructed.
Notes
-----
The primary use for the `tableload` function is to allow the input of
ASCII data that was edited in a standard text editor of the table
data and parameters. The tabledump function can be used to create the
initial ASCII files.
"""
return BinTableHDU.load(datafile, cdfile, hfile, replace=True)
if isinstance(tableload.__doc__, str):
tableload.__doc__ += BinTableHDU._tdump_file_format.replace("\n", "\n ")
def _getext(filename, mode, *args, ext=None, extname=None, extver=None, **kwargs):
"""
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities.
"""
err_msg = "Redundant/conflicting extension arguments(s): {}".format(
{"args": args, "ext": ext, "extname": extname, "extver": extver}
)
# This code would be much simpler if just one way of specifying an
# extension were picked. But now we need to support all possible ways for
# the time being.
if len(args) == 1:
# Must be either an extension number, an extension name, or an
# (extname, extver) tuple
if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2):
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
ext = args[0]
elif isinstance(args[0], str):
# The first arg is an extension name; it could still be valid
# to provide an extver kwarg
if ext is not None or extname is not None:
raise TypeError(err_msg)
extname = args[0]
else:
# Take whatever we have as the ext argument; we'll validate it
# below
ext = args[0]
elif len(args) == 2:
# Must be an extname and extver
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
extname = args[0]
extver = args[1]
elif len(args) > 2:
raise TypeError("Too many positional arguments.")
if ext is not None and not (
_is_int(ext)
or (
isinstance(ext, tuple)
and len(ext) == 2
and isinstance(ext[0], str)
and _is_int(ext[1])
)
):
raise ValueError(
"The ext keyword must be either an extension number "
"(zero-indexed) or a (extname, extver) tuple."
)
if extname is not None and not isinstance(extname, str):
raise ValueError("The extname argument must be a string.")
if extver is not None and not _is_int(extver):
raise ValueError("The extver argument must be an integer.")
if ext is None and extname is None and extver is None:
ext = 0
elif ext is not None and (extname is not None or extver is not None):
raise TypeError(err_msg)
elif extname:
if extver:
ext = (extname, extver)
else:
ext = (extname, 1)
elif extver and extname is None:
raise TypeError("extver alone cannot specify an extension.")
hdulist = fitsopen(filename, mode=mode, **kwargs)
return hdulist, ext
def _makehdu(data, header):
if header is None:
header = Header()
hdu = _BaseHDU._from_data(data, header)
if hdu.__class__ in (_BaseHDU, _ValidHDU):
# The HDU type was unrecognized, possibly due to a
# nonexistent/incomplete header
if (
isinstance(data, np.ndarray) and data.dtype.fields is not None
) or isinstance(data, np.recarray):
hdu = BinTableHDU(data, header=header)
elif isinstance(data, np.ndarray) or _is_dask_array(data):
hdu = ImageHDU(data, header=header)
else:
raise KeyError("Data must be a numpy array.")
return hdu
def _stat_filename_or_fileobj(filename):
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
closed = fileobj_closed(filename)
name = fileobj_name(filename) or ""
try:
loc = filename.tell()
except AttributeError:
loc = 0
noexist_or_empty = (
name and (not os.path.exists(name) or (os.path.getsize(name) == 0))
) or (not name and loc == 0)
return name, closed, noexist_or_empty
def _get_file_mode(filename, default="readonly"):
"""
Allow file object to already be opened in any of the valid modes and
and leave the file in the same state (opened or closed) as when
the function was called.
"""
mode = default
closed = fileobj_closed(filename)
fmode = fileobj_mode(filename)
if fmode is not None:
mode = FILE_MODES.get(fmode)
if mode is None:
raise OSError(
"File mode of the input file object ({!r}) cannot be used to "
"read/write FITS files.".format(fmode)
)
return mode, closed
|
ad9eea3b355991575e06f79d60dc232d41af657437bbb938b10430cbad6a783a | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
import itertools
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
import numpy as np
from packaging.version import Version
from astropy.utils import data
from astropy.utils.exceptions import AstropyUserWarning
path_like = (str, bytes, os.PathLike)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = f"_update_{notification}"
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state["_listeners"] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Examples
--------
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter("__name__")):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.current_thread()
single_thread = (
threading.active_count() == 1 and curr_thread.name == "MainThread"
)
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn(
f"KeyboardInterrupt ignored until {func.__name__} is complete!",
AstropyUserWarning,
)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
if sys.version_info[:2] >= (3, 10):
from itertools import pairwise
else:
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode("ascii")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.str_):
ns = np.char.encode(s, "ascii").view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.bytes_):
raise TypeError("string operation on non-string array")
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode("ascii")
except UnicodeDecodeError:
warnings.warn(
"non-ASCII characters are present in the FITS "
'file header and have been replaced by "?" characters',
AstropyUserWarning,
)
s = s.decode("ascii", errors="replace")
return s.replace("\ufffd", "?")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.bytes_):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array : https://github.com/numpy/numpy/issues/13156
dt = s.dtype.str.replace("S", "U")
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, "ascii").view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.str_):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError("string operation on non-string array")
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, "readable"):
return f.readable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "read"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "r+"):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, "writable"):
return f.writable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "write"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "wa+"):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, "buffer"):
return isfile(f.buffer)
elif hasattr(f, "raw"):
return isfile(f.raw)
return False
def fileobj_name(f):
"""
Returns the 'name' of file-like object *f*, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, (str, bytes)):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, "name"):
return f.name
elif hasattr(f, "filename"):
return f.filename
elif hasattr(f, "__class__"):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if *f* is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, path_like):
return True
if hasattr(f, "closed"):
return f.closed
elif hasattr(f, "fileobj") and hasattr(f.fileobj, "closed"):
return f.fileobj.closed
elif hasattr(f, "fp") and hasattr(f.fp, "closed"):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, "fileobj") and hasattr(f.fileobj, "mode"):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, "fileobj_mode"):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, "fp") and hasattr(f.fp, "mode"):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, "mode"):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return "rb"
elif mode == gzip.WRITE:
return "wb"
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if "+" in mode:
mode = mode.replace("+", "")
mode += "+"
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, "binary"):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return "b" in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split("\n\n")
def maybe_fill(t):
if all(len(line) < width for line in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return "\n\n".join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if sys.platform == "darwin" and Version(platform.mac_ver()[0]) < Version(
"10.9"
):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024**3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2**32) - 1
_WIN_WRITE_LIMIT = (2**31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : ndarray
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
try:
seekable = outfile.seekable()
except AttributeError:
seekable = False
if isfile(outfile) and seekable:
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (
sys.platform == "darwin"
and arr.nbytes >= _OSX_WRITE_LIMIT + 1
and arr.nbytes % 4096 == 0
):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith("win"):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx : idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, "nditer"):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order="C"):
fileobj.write(item.tobytes())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if (sys.byteorder == "little" and byteorder == ">") or (
sys.byteorder == "big" and byteorder == "<"
):
for item in arr.flat:
fileobj.write(item.byteswap().tobytes())
else:
for item in arr.flat:
fileobj.write(item.tobytes())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif array.dtype.itemsize == dtype.itemsize and not (
np.issubdtype(array.dtype, np.number) and np.issubdtype(dtype, np.number)
):
# Includes a special case when both dtypes are at least numeric to
# account for old Trac ticket 218 (now inaccessible).
return array.view(dtype)
else:
return array.astype(dtype)
def _pseudo_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
# special case for int8
if dtype.kind == "i" and dtype.itemsize == 1:
return -128
assert dtype.kind == "u"
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_integer(dtype):
return (dtype.kind == "u" and dtype.itemsize >= 2) or (
dtype.kind == "i" and dtype.itemsize == 1
)
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode("utf8") + b" ", dtype="S1")
# locations of the blanks
blank_loc = np.nonzero(arr == b" ")[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, "base") and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ""
if not isinstance(hdulist, list):
hdulist = [hdulist]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = (
"Not enough space on disk: requested {}, available {}. ".format(
hdulist_size, free_space
)
)
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(f"io/fits/tests/data/{filename}", "astropy")
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in "SU":
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == "S" else 4
dt_int = f"({dt.itemsize // bpc},){dt.byteorder}u{bpc}"
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j : j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
def _is_dask_array(data):
"""Check whether data is a dask array.
We avoid importing dask unless it is likely it is a dask array,
so that non-dask code is not slowed down.
"""
if not hasattr(data, "compute"):
return False
try:
from dask.array import Array
except ImportError:
# If we cannot import dask, surely this cannot be a
# dask array!
return False
else:
return isinstance(data, Array)
|
62958392a71425d478de5ce37c93568ab9a50cda9ef33fabe3892be74d1fe9c8 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import re
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from . import conf
from .util import _is_int, _str_to_num, _words_group, translate
from .verify import VerifyError, VerifyWarning, _ErrList, _Verify
__all__ = ["Card", "Undefined"]
FIX_FP_TABLE = str.maketrans("de", "DE")
FIX_FP_TABLE2 = str.maketrans("dD", "eE")
CARD_LENGTH = 80
BLANK_CARD = " " * CARD_LENGTH
KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords
VALUE_INDICATOR = "= " # The standard FITS value indicator
VALUE_INDICATOR_LEN = len(VALUE_INDICATOR)
HIERARCH_VALUE_INDICATOR = "=" # HIERARCH cards may use a shortened indicator
class Undefined:
"""Undefined value."""
def __init__(self):
# This __init__ is required to be here for Sphinx documentation
pass
UNDEFINED = Undefined()
class Card(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r"^[A-Z0-9_-]{0,%d}$" % KEYWORD_LENGTH)
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(r"^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$", re.I)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r"(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?"
_digits_NFSC = r"(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?"
_numr_FSC = r"[+-]?" + _digits_FSC
_numr_NFSC = r"[+-]? *" + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(rf"(?P<sign>[+-])?0*?(?P<digt>{_digits_FSC})")
_number_NFSC_RE = re.compile(rf"(?P<sign>[+-])? *0*?(?P<digt>{_digits_NFSC})")
# Used in cards using the CONTINUE convention which expect a string
# followed by an optional comment
_strg = r"\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )"
_comm_field = r"(?P<comm_field>(?P<sepr>/ *)(?P<comm>(.|\n)*))"
_strg_comment_RE = re.compile(f"({_strg})? *{_comm_field}?")
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
_ascii_text_re = re.compile(r"[ -~]*\Z")
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
# fmt: off
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + r') *, *'
r'(?P<imag>' + _numr_FSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$'
)
# fmt: on
# fmt: off
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + r') *, *'
r'(?P<imag>' + _numr_NFSC + r') *\))'
fr')? *){_comm_field}?$'
)
# fmt: on
_rvkc_identifier = r"[a-zA-Z_]\w*"
_rvkc_field = _rvkc_identifier + r"(\.\d+)?"
_rvkc_field_specifier_s = rf"{_rvkc_field}(\.{_rvkc_field})*"
_rvkc_field_specifier_val = r"(?P<keyword>{}): +(?P<val>{})".format(
_rvkc_field_specifier_s, _numr_FSC
)
_rvkc_keyword_val = rf"\'(?P<rawval>{_rvkc_field_specifier_val})\'"
_rvkc_keyword_val_comm = rf" *{_rvkc_keyword_val} *(/ *(?P<comm>[ -~]*))?$"
_rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + "$")
# regular expression to extract the key and the field specifier from a
# string that is being used to index into a card list that contains
# record value keyword cards (ex. 'DP1.AXIS.1')
_rvkc_keyword_name_RE = re.compile(
r"(?P<keyword>{})\.(?P<field_specifier>{})$".format(
_rvkc_identifier, _rvkc_field_specifier_s
)
)
# regular expression to extract the field specifier and value and comment
# from the string value of a record value keyword card
# (ex "'AXIS.1: 1' / a comment")
_rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm)
_commentary_keywords = {"", "COMMENT", "HISTORY", "END"}
_special_keywords = _commentary_keywords.union(["CONTINUE"])
# The default value indicator; may be changed if required by a convention
# (namely HIERARCH cards)
_value_indicator = VALUE_INDICATOR
def __init__(self, keyword=None, value=None, comment=None, **kwargs):
# For backwards compatibility, support the 'key' keyword argument:
if keyword is None and "key" in kwargs:
keyword = kwargs["key"]
self._keyword = None
self._value = None
self._comment = None
self._valuestring = None
self._image = None
# This attribute is set to False when creating the card from a card
# image to ensure that the contents of the image get verified at some
# point
self._verified = True
# A flag to conveniently mark whether or not this was a valid HIERARCH
# card
self._hierarch = False
# If the card could not be parsed according the the FITS standard or
# any recognized non-standard conventions, this will be True
self._invalid = False
self._field_specifier = None
# These are used primarily only by RVKCs
self._rawkeyword = None
self._rawvalue = None
if not (
keyword is not None
and value is not None
and self._check_if_rvkc(keyword, value)
):
# If _check_if_rvkc passes, it will handle setting the keyword and
# value
if keyword is not None:
self.keyword = keyword
if value is not None:
self.value = value
if comment is not None:
self.comment = comment
self._modified = False
self._valuemodified = False
def __repr__(self):
return repr((self.keyword, self.value, self.comment))
def __str__(self):
return self.image
def __len__(self):
return 3
def __getitem__(self, index):
return (self.keyword, self.value, self.comment)[index]
@property
def keyword(self):
"""Returns the keyword name parsed from the card image."""
if self._keyword is not None:
return self._keyword
elif self._image:
self._keyword = self._parse_keyword()
return self._keyword
else:
self.keyword = ""
return ""
@keyword.setter
def keyword(self, keyword):
"""Set the key attribute; once set it cannot be modified."""
if self._keyword is not None:
raise AttributeError("Once set, the Card keyword may not be modified")
elif isinstance(keyword, str):
# Be nice and remove trailing whitespace--some FITS code always
# pads keywords out with spaces; leading whitespace, however,
# should be strictly disallowed.
keyword = keyword.rstrip()
keyword_upper = keyword.upper()
if len(keyword) <= KEYWORD_LENGTH and self._keywd_FSC_RE.match(
keyword_upper
):
# For keywords with length > 8 they will be HIERARCH cards,
# and can have arbitrary case keywords
if keyword_upper == "END":
raise ValueError("Keyword 'END' not allowed.")
keyword = keyword_upper
elif self._keywd_hierarch_RE.match(keyword):
# In prior versions of PyFITS (*) HIERARCH cards would only be
# created if the user-supplied keyword explicitly started with
# 'HIERARCH '. Now we will create them automatically for long
# keywords, but we still want to support the old behavior too;
# the old behavior makes it possible to create HIERARCH cards
# that would otherwise be recognized as RVKCs
# (*) This has never affected Astropy, because it was changed
# before PyFITS was merged into Astropy!
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
if keyword_upper[:9] == "HIERARCH ":
# The user explicitly asked for a HIERARCH card, so don't
# bug them about it...
keyword = keyword[9:].strip()
else:
# We'll gladly create a HIERARCH card, but a warning is
# also displayed
warnings.warn(
"Keyword name {!r} is greater than 8 characters or "
"contains characters not allowed by the FITS "
"standard; a HIERARCH card will be created.".format(keyword),
VerifyWarning,
)
else:
raise ValueError(f"Illegal keyword name: {keyword!r}.")
self._keyword = keyword
self._modified = True
else:
raise ValueError(f"Keyword name {keyword!r} is not a string.")
@property
def value(self):
"""The value associated with the keyword stored in this card."""
if self.field_specifier:
return float(self._value)
if self._value is not None:
value = self._value
elif self._valuestring is not None or self._image:
value = self._value = self._parse_value()
else:
if self._keyword == "":
self._value = value = ""
else:
self._value = value = UNDEFINED
if conf.strip_header_whitespace and isinstance(value, str):
value = value.rstrip()
return value
@value.setter
def value(self, value):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if value is None:
value = UNDEFINED
try:
oldvalue = self.value
except VerifyError:
# probably a parsing error, falling back to the internal _value
# which should be None. This may happen while calling _fix_value.
oldvalue = self._value
if oldvalue is None:
oldvalue = UNDEFINED
if not isinstance(
value,
(
str,
int,
float,
complex,
bool,
Undefined,
np.floating,
np.integer,
np.complexfloating,
np.bool_,
),
):
raise ValueError(f"Illegal value: {value!r}.")
if isinstance(value, (float, np.float32)) and (
np.isnan(value) or np.isinf(value)
):
# value is checked for both float and np.float32 instances
# since np.float32 is not considered a Python float.
raise ValueError(
f"Floating point {value!r} values are not allowed in FITS headers."
)
elif isinstance(value, str):
m = self._ascii_text_re.match(value)
if not m:
raise ValueError(
"FITS header values must contain standard printable ASCII "
"characters; {!r} contains characters not representable in "
"ASCII or non-printable characters.".format(value)
)
elif isinstance(value, np.bool_):
value = bool(value)
if conf.strip_header_whitespace and (
isinstance(oldvalue, str) and isinstance(value, str)
):
# Ignore extra whitespace when comparing the new value to the old
different = oldvalue.rstrip() != value.rstrip()
elif isinstance(oldvalue, bool) or isinstance(value, bool):
different = oldvalue is not value
else:
different = oldvalue != value or not isinstance(value, type(oldvalue))
if different:
self._value = value
self._rawvalue = None
self._modified = True
self._valuestring = None
self._valuemodified = True
if self.field_specifier:
try:
self._value = _int_or_float(self._value)
except ValueError:
raise ValueError(f"value {self._value} is not a float")
@value.deleter
def value(self):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
if not self.field_specifier:
self.value = ""
else:
raise AttributeError(
"Values cannot be deleted from record-valued keyword cards"
)
@property
def rawkeyword(self):
"""On record-valued keyword cards this is the name of the standard <= 8
character FITS keyword that this RVKC is stored in. Otherwise it is
the card's normal keyword.
"""
if self._rawkeyword is not None:
return self._rawkeyword
elif self.field_specifier is not None:
self._rawkeyword = self.keyword.split(".", 1)[0]
return self._rawkeyword
else:
return self.keyword
@property
def rawvalue(self):
"""On record-valued keyword cards this is the raw string value in
the ``<field-specifier>: <value>`` format stored in the card in order
to represent a RVKC. Otherwise it is the card's normal value.
"""
if self._rawvalue is not None:
return self._rawvalue
elif self.field_specifier is not None:
self._rawvalue = f"{self.field_specifier}: {self.value}"
return self._rawvalue
else:
return self.value
@property
def comment(self):
"""Get the comment attribute from the card image if not already set."""
if self._comment is not None:
return self._comment
elif self._image:
self._comment = self._parse_comment()
return self._comment
else:
self._comment = ""
return ""
@comment.setter
def comment(self, comment):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if comment is None:
comment = ""
if isinstance(comment, str):
m = self._ascii_text_re.match(comment)
if not m:
raise ValueError(
"FITS header comments must contain standard printable "
f"ASCII characters; {comment!r} contains characters not "
"representable in ASCII or non-printable characters."
)
try:
oldcomment = self.comment
except VerifyError:
# probably a parsing error, falling back to the internal _comment
# which should be None.
oldcomment = self._comment
if oldcomment is None:
oldcomment = ""
if comment != oldcomment:
self._comment = comment
self._modified = True
@comment.deleter
def comment(self):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
self.comment = ""
@property
def field_specifier(self):
"""
The field-specifier of record-valued keyword cards; always `None` on
normal cards.
"""
# Ensure that the keyword exists and has been parsed--the will set the
# internal _field_specifier attribute if this is a RVKC.
if self.keyword:
return self._field_specifier
else:
return None
@field_specifier.setter
def field_specifier(self, field_specifier):
if not field_specifier:
raise ValueError(
"The field-specifier may not be blank in record-valued keyword cards."
)
elif not self.field_specifier:
raise AttributeError(
"Cannot coerce cards to be record-valued keyword cards by "
"setting the field_specifier attribute"
)
elif field_specifier != self.field_specifier:
self._field_specifier = field_specifier
# The keyword need also be updated
keyword = self._keyword.split(".", 1)[0]
self._keyword = ".".join([keyword, field_specifier])
self._modified = True
@field_specifier.deleter
def field_specifier(self):
raise AttributeError(
"The field_specifier attribute may not be "
"deleted from record-valued keyword cards."
)
@property
def image(self):
"""
The card "image", that is, the 80 byte character string that represents
this card in an actual FITS header.
"""
if self._image and not self._verified:
self.verify("fix+warn")
if self._image is None or self._modified:
self._image = self._format_image()
return self._image
@property
def is_blank(self):
"""
`True` if the card is completely blank--that is, it has no keyword,
value, or comment. It appears in the header as 80 spaces.
Returns `False` otherwise.
"""
if not self._verified:
# The card image has not been parsed yet; compare directly with the
# string representation of a blank card
return self._image == BLANK_CARD
# If the keyword, value, and comment are all empty (for self.value
# explicitly check that it is a string value, since a blank value is
# returned as '')
return (
not self.keyword
and (isinstance(self.value, str) and not self.value)
and not self.comment
)
@classmethod
def fromstring(cls, image):
"""
Construct a `Card` object from a (raw) string. It will pad the string
if it is not the length of a card image (80 columns). If the card
image is longer than 80 columns, assume it contains ``CONTINUE``
card(s).
"""
card = cls()
if isinstance(image, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place
image = image.decode("latin1")
card._image = _pad(image)
card._verified = False
return card
@classmethod
def normalize_keyword(cls, keyword):
"""
`classmethod` to convert a keyword value that may contain a
field-specifier to uppercase. The effect is to raise the key to
uppercase and leave the field specifier in its original case.
Parameters
----------
keyword : or str
A keyword value or a ``keyword.field-specifier`` value
"""
# Test first for the most common case: a standard FITS keyword provided
# in standard all-caps
if len(keyword) <= KEYWORD_LENGTH and cls._keywd_FSC_RE.match(keyword):
return keyword
# Test if this is a record-valued keyword
match = cls._rvkc_keyword_name_RE.match(keyword)
if match:
return ".".join(
(match.group("keyword").strip().upper(), match.group("field_specifier"))
)
elif len(keyword) > 9 and keyword[:9].upper() == "HIERARCH ":
# Remove 'HIERARCH' from HIERARCH keywords; this could lead to
# ambiguity if there is actually a keyword card containing
# "HIERARCH HIERARCH", but shame on you if you do that.
return keyword[9:].strip().upper()
else:
# A normal FITS keyword, but provided in non-standard case
return keyword.strip().upper()
def _check_if_rvkc(self, *args):
"""
Determine whether or not the card is a record-valued keyword card.
If one argument is given, that argument is treated as a full card image
and parsed as such. If two arguments are given, the first is treated
as the card keyword (including the field-specifier if the card is
intended as a RVKC), and the second as the card value OR the first value
can be the base keyword, and the second value the 'field-specifier:
value' string.
If the check passes the ._keyword, ._value, and .field_specifier
keywords are set.
Examples
--------
::
self._check_if_rvkc('DP1', 'AXIS.1: 2')
self._check_if_rvkc('DP1.AXIS.1', 2)
self._check_if_rvkc('DP1 = AXIS.1: 2')
"""
if not conf.enable_record_valued_keyword_cards:
return False
if len(args) == 1:
return self._check_if_rvkc_image(*args)
elif len(args) == 2:
keyword, value = args
if not isinstance(keyword, str):
return False
if keyword in self._commentary_keywords:
return False
match = self._rvkc_keyword_name_RE.match(keyword)
if match and isinstance(value, (int, float)):
self._init_rvkc(
match.group("keyword"), match.group("field_specifier"), None, value
)
return True
# Testing for ': ' is a quick way to avoid running the full regular
# expression, speeding this up for the majority of cases
if isinstance(value, str) and value.find(": ") > 0:
match = self._rvkc_field_specifier_val_RE.match(value)
if match and self._keywd_FSC_RE.match(keyword):
self._init_rvkc(
keyword, match.group("keyword"), value, match.group("val")
)
return True
def _check_if_rvkc_image(self, *args):
"""
Implements `Card._check_if_rvkc` for the case of an unparsed card
image. If given one argument this is the full intact image. If given
two arguments the card has already been split between keyword and
value+comment at the standard value indicator '= '.
"""
if len(args) == 1:
image = args[0]
eq_idx = image.find(VALUE_INDICATOR)
if eq_idx < 0 or eq_idx > 9:
return False
keyword = image[:eq_idx]
rest = image[eq_idx + VALUE_INDICATOR_LEN :]
else:
keyword, rest = args
rest = rest.lstrip()
# This test allows us to skip running the full regular expression for
# the majority of cards that do not contain strings or that definitely
# do not contain RVKC field-specifiers; it's very much a
# micro-optimization but it does make a measurable difference
if not rest or rest[0] != "'" or rest.find(": ") < 2:
return False
match = self._rvkc_keyword_val_comm_RE.match(rest)
if match:
self._init_rvkc(
keyword,
match.group("keyword"),
match.group("rawval"),
match.group("val"),
)
return True
def _init_rvkc(self, keyword, field_specifier, field, value):
"""
Sort of addendum to Card.__init__ to set the appropriate internal
attributes if the card was determined to be a RVKC.
"""
keyword_upper = keyword.upper()
self._keyword = ".".join((keyword_upper, field_specifier))
self._rawkeyword = keyword_upper
self._field_specifier = field_specifier
self._value = _int_or_float(value)
self._rawvalue = field
def _parse_keyword(self):
keyword = self._image[:KEYWORD_LENGTH].strip()
keyword_upper = keyword.upper()
if keyword_upper in self._special_keywords:
return keyword_upper
elif (
keyword_upper == "HIERARCH"
and self._image[8] == " "
and HIERARCH_VALUE_INDICATOR in self._image
):
# This is valid HIERARCH card as described by the HIERARCH keyword
# convention:
# http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:]
return keyword.strip()
else:
val_ind_idx = self._image.find(VALUE_INDICATOR)
if 0 <= val_ind_idx <= KEYWORD_LENGTH:
# The value indicator should appear in byte 8, but we are
# flexible and allow this to be fixed
if val_ind_idx < KEYWORD_LENGTH:
keyword = keyword[:val_ind_idx]
keyword_upper = keyword_upper[:val_ind_idx]
rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN :]
# So far this looks like a standard FITS keyword; check whether
# the value represents a RVKC; if so then we pass things off to
# the RVKC parser
if self._check_if_rvkc_image(keyword, rest):
return self._keyword
return keyword_upper
else:
warnings.warn(
"The following header keyword is invalid or follows an "
"unrecognized non-standard convention:\n{}".format(self._image),
AstropyUserWarning,
)
self._invalid = True
return keyword
def _parse_value(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# Likewise for invalid cards
if self.keyword.upper() in self._commentary_keywords or self._invalid:
return self._image[KEYWORD_LENGTH:].rstrip()
if self._check_if_rvkc(self._image):
return self._value
m = self._value_NFSC_RE.match(self._split()[1])
if m is None:
raise VerifyError(
f"Unparsable card ({self.keyword}), fix it first with .verify('fix')."
)
if m.group("bool") is not None:
value = m.group("bool") == "T"
elif m.group("strg") is not None:
value = re.sub("''", "'", m.group("strg"))
elif m.group("numr") is not None:
# Check for numbers with leading 0s.
numr = self._number_NFSC_RE.match(m.group("numr"))
digt = translate(numr.group("digt"), FIX_FP_TABLE2, " ")
if numr.group("sign") is None:
sign = ""
else:
sign = numr.group("sign")
value = _str_to_num(sign + digt)
elif m.group("cplx") is not None:
# Check for numbers with leading 0s.
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE2, " ")
if real.group("sign") is None:
rsign = ""
else:
rsign = real.group("sign")
value = _str_to_num(rsign + rdigt)
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE2, " ")
if imag.group("sign") is None:
isign = ""
else:
isign = imag.group("sign")
value += _str_to_num(isign + idigt) * 1j
else:
value = UNDEFINED
if not self._valuestring:
self._valuestring = m.group("valu")
return value
def _parse_comment(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# likewise for invalid/unparsable cards
if self.keyword in Card._commentary_keywords or self._invalid:
return ""
valuecomment = self._split()[1]
m = self._value_NFSC_RE.match(valuecomment)
comment = ""
if m is not None:
# Don't combine this if statement with the one above, because
# we only want the elif case to run if this was not a valid
# card at all
if m.group("comm"):
comment = m.group("comm").rstrip()
elif "/" in valuecomment:
# The value in this FITS file was not in a valid/known format. In
# this case the best we can do is guess that everything after the
# first / was meant to be the comment
comment = valuecomment.split("/", 1)[1].strip()
return comment
def _split(self):
"""
Split the card image between the keyword and the rest of the card.
"""
if self._image is not None:
# If we already have a card image, don't try to rebuild a new card
# image, which self.image would do
image = self._image
else:
image = self.image
# Split cards with CONTINUE cards or commentary keywords with long
# values
if len(self._image) > self.length:
values = []
comments = []
keyword = None
for card in self._itersubcards():
kw, vc = card._split()
if keyword is None:
keyword = kw
if keyword in self._commentary_keywords:
values.append(vc)
continue
# Should match a string followed by a comment; if not it
# might be an invalid Card, so we just take it verbatim
m = self._strg_comment_RE.match(vc)
if not m:
return kw, vc
value = m.group("strg") or ""
value = value.rstrip().replace("''", "'")
if value and value[-1] == "&":
value = value[:-1]
values.append(value)
comment = m.group("comm")
if comment:
comments.append(comment.rstrip())
if keyword in self._commentary_keywords:
valuecomment = "".join(values)
else:
# CONTINUE card
valuecomment = f"'{''.join(values)}' / {' '.join(comments)}"
return keyword, valuecomment
if self.keyword in self._special_keywords:
keyword, valuecomment = image.split(" ", 1)
else:
try:
delim_index = image.index(self._value_indicator)
except ValueError:
delim_index = None
# The equal sign may not be any higher than column 10; anything
# past that must be considered part of the card value
if delim_index is None:
keyword = image[:KEYWORD_LENGTH]
valuecomment = image[KEYWORD_LENGTH:]
elif delim_index > 10 and image[:9] != "HIERARCH ":
keyword = image[:8]
valuecomment = image[8:]
else:
keyword, valuecomment = image.split(self._value_indicator, 1)
return keyword.strip(), valuecomment.strip()
def _fix_keyword(self):
if self.field_specifier:
keyword, field_specifier = self._keyword.split(".", 1)
self._keyword = ".".join([keyword.upper(), field_specifier])
else:
self._keyword = self._keyword.upper()
self._modified = True
def _fix_value(self):
"""Fix the card image for fixable non-standard compliance."""
value = None
keyword, valuecomment = self._split()
m = self._value_NFSC_RE.match(valuecomment)
# for the unparsable case
if m is None:
try:
value, comment = valuecomment.split("/", 1)
self.value = value.strip()
self.comment = comment.strip()
except (ValueError, IndexError):
self.value = valuecomment
self._valuestring = self._value
return
elif m.group("numr") is not None:
numr = self._number_NFSC_RE.match(m.group("numr"))
value = translate(numr.group("digt"), FIX_FP_TABLE, " ")
if numr.group("sign") is not None:
value = numr.group("sign") + value
elif m.group("cplx") is not None:
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE, " ")
if real.group("sign") is not None:
rdigt = real.group("sign") + rdigt
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE, " ")
if imag.group("sign") is not None:
idigt = imag.group("sign") + idigt
value = f"({rdigt}, {idigt})"
self._valuestring = value
# The value itself has not been modified, but its serialized
# representation (as stored in self._valuestring) has been changed, so
# still set this card as having been modified (see ticket #137)
self._modified = True
def _format_keyword(self):
if self.keyword:
if self.field_specifier:
keyword = self.keyword.split(".", 1)[0]
return "{:{len}}".format(keyword, len=KEYWORD_LENGTH)
elif self._hierarch:
return f"HIERARCH {self.keyword} "
else:
return "{:{len}}".format(self.keyword, len=KEYWORD_LENGTH)
else:
return " " * KEYWORD_LENGTH
def _format_value(self):
# value string
float_types = (float, np.floating, complex, np.complexfloating)
# Force the value to be parsed out first
value = self.value
# But work with the underlying raw value instead (to preserve
# whitespace, for now...)
value = self._value
if self.keyword in self._commentary_keywords:
# The value of a commentary card must be just a raw unprocessed
# string
value = str(value)
elif (
self._valuestring
and not self._valuemodified
and isinstance(self.value, float_types)
):
# Keep the existing formatting for float/complex numbers
value = f"{self._valuestring:>20}"
elif self.field_specifier:
value = _format_value(self._value).strip()
value = f"'{self.field_specifier}: {value}'"
else:
value = _format_value(value)
# For HIERARCH cards the value should be shortened to conserve space
if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH:
value = value.strip()
return value
def _format_comment(self):
if not self.comment:
return ""
else:
return f" / {self._comment}"
def _format_image(self):
keyword = self._format_keyword()
value = self._format_value()
is_commentary = keyword.strip() in self._commentary_keywords
if is_commentary:
comment = ""
else:
comment = self._format_comment()
# equal sign string
# by default use the standard value indicator even for HIERARCH cards;
# later we may abbreviate it if necessary
delimiter = VALUE_INDICATOR
if is_commentary:
delimiter = ""
# put all parts together
output = "".join([keyword, delimiter, value, comment])
# For HIERARCH cards we can save a bit of space if necessary by
# removing the space between the keyword and the equals sign; I'm
# guessing this is part of the HIEARCH card specification
keywordvalue_length = len(keyword) + len(delimiter) + len(value)
if keywordvalue_length > self.length and keyword.startswith("HIERARCH"):
if keywordvalue_length == self.length + 1 and keyword[-1] == " ":
output = "".join([keyword[:-1], delimiter, value, comment])
else:
# I guess the HIERARCH card spec is incompatible with CONTINUE
# cards
raise ValueError(
f"The header keyword {self.keyword!r} with its value is too long"
)
if len(output) <= self.length:
output = f"{output:80}"
else:
# longstring case (CONTINUE card)
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if isinstance(self.value, str) and len(value) > (self.length - 10):
output = self._format_long_image()
else:
warnings.warn(
"Card is too long, comment will be truncated.", VerifyWarning
)
output = output[: Card.length]
return output
def _format_long_image(self):
"""
Break up long string value/comment into ``CONTINUE`` cards.
This is a primitive implementation: it will put the value
string in one block and the comment string in another. Also,
it does not break at the blank space between words. So it may
not look pretty.
"""
if self.keyword in Card._commentary_keywords:
return self._format_long_commentary_image()
value_length = 67
comment_length = 64
output = []
# do the value string
value = self._value.replace("'", "''")
words = _words_group(value, value_length)
for idx, word in enumerate(words):
if idx == 0:
headstr = "{:{len}}= ".format(self.keyword, len=KEYWORD_LENGTH)
else:
headstr = "CONTINUE "
# If this is the final CONTINUE remove the '&'
if not self.comment and idx == len(words) - 1:
value_format = "'{}'"
else:
value_format = "'{}&'"
value = value_format.format(word)
output.append(f"{headstr + value:80}")
# do the comment string
comment_format = "{}"
if self.comment:
words = _words_group(self.comment, comment_length)
for idx, word in enumerate(words):
# If this is the final CONTINUE remove the '&'
if idx == len(words) - 1:
headstr = "CONTINUE '' / "
else:
headstr = "CONTINUE '&' / "
comment = headstr + comment_format.format(word)
output.append(f"{comment:80}")
return "".join(output)
def _format_long_commentary_image(self):
"""
If a commentary card's value is too long to fit on a single card, this
will render the card as multiple consecutive commentary card of the
same type.
"""
maxlen = Card.length - KEYWORD_LENGTH
value = self._format_value()
output = []
idx = 0
while idx < len(value):
output.append(str(Card(self.keyword, value[idx : idx + maxlen])))
idx += maxlen
return "".join(output)
def _verify(self, option="warn"):
errs = []
fix_text = f"Fixed {self.keyword!r} card to meet the FITS standard."
# Don't try to verify cards that already don't meet any recognizable
# standard
if self._invalid:
return _ErrList(errs)
# verify the equal sign position
if self.keyword not in self._commentary_keywords and (
self._image
and self._image[:9].upper() != "HIERARCH "
and self._image.find("=") != 8
):
errs.append(
dict(
err_text=(
"Card {!r} is not FITS standard (equal sign not "
"at column 8).".format(self.keyword)
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keys.
if (self._image and self._image[:8].upper() == "HIERARCH") or self._hierarch:
pass
else:
if self._image:
# PyFITS will auto-uppercase any standard keyword, so lowercase
# keywords can only occur if they came from the wild
keyword = self._split()[0]
if keyword != keyword.upper():
# Keyword should be uppercase unless it's a HIERARCH card
errs.append(
dict(
err_text=f"Card keyword {keyword!r} is not upper case.",
fix_text=fix_text,
fix=self._fix_keyword,
)
)
keyword = self.keyword
if self.field_specifier:
keyword = keyword.split(".", 1)[0]
if not self._keywd_FSC_RE.match(keyword):
errs.append(
dict(err_text=f"Illegal keyword name {keyword!r}", fixable=False)
)
# verify the value, it may be fixable
keyword, valuecomment = self._split()
if self.keyword in self._commentary_keywords:
# For commentary keywords all that needs to be ensured is that it
# contains only printable ASCII characters
if not self._ascii_text_re.match(valuecomment):
errs.append(
dict(
err_text=(
f"Unprintable string {valuecomment!r}; commentary "
"cards may only contain printable ASCII characters"
),
fixable=False,
)
)
else:
if not self._valuemodified:
m = self._value_FSC_RE.match(valuecomment)
# If the value of a card was replaced before the card was ever
# even verified, the new value can be considered valid, so we
# don't bother verifying the old value. See
# https://github.com/astropy/astropy/issues/5408
if m is None:
errs.append(
dict(
err_text=(
f"Card {self.keyword!r} is not FITS standard "
f"(invalid value string: {valuecomment!r})."
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the comment (string), it is never fixable
m = self._value_NFSC_RE.match(valuecomment)
if m is not None:
comment = m.group("comm")
if comment is not None:
if not self._ascii_text_re.match(comment):
errs.append(
dict(
err_text=(
f"Unprintable string {comment!r}; header comments "
"may only contain printable ASCII characters"
),
fixable=False,
)
)
errs = _ErrList([self.run_option(option, **err) for err in errs])
self._verified = True
return errs
def _itersubcards(self):
"""
If the card image is greater than 80 characters, it should consist of a
normal card followed by one or more CONTINUE card. This method returns
the subcards that make up this logical card.
This can also support the case where a HISTORY or COMMENT card has a
long value that is stored internally as multiple concatenated card
images.
"""
ncards = len(self._image) // Card.length
for idx in range(0, Card.length * ncards, Card.length):
card = Card.fromstring(self._image[idx : idx + Card.length])
if idx > 0 and card.keyword.upper() not in self._special_keywords:
raise VerifyError(
"Long card images must have CONTINUE cards after "
"the first card or have commentary keywords like "
"HISTORY or COMMENT."
)
if not isinstance(card.value, str):
raise VerifyError("CONTINUE cards must have string values.")
yield card
def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e))
def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == "":
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = f"'{exp_val_str:8}'"
return f"{val_str:20}"
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return f"{repr(value)[0]:>20}" # T or F
elif _is_int(value):
return f"{value:>20d}"
elif isinstance(value, (float, np.floating)):
return f"{_format_float(value):>20}"
elif isinstance(value, (complex, np.complexfloating)):
val_str = f"({_format_float(value.real)}, {_format_float(value.imag)})"
return f"{val_str:>20}"
elif isinstance(value, Undefined):
return ""
else:
return ""
def _format_float(value):
"""Format a floating number to make sure it is at most 20 characters."""
value_str = str(value).replace("e", "E")
# Limit the value string to at most 20 characters.
if (str_len := len(value_str)) > 20:
idx = value_str.find("E")
if idx < 0:
# No scientific notation, truncate decimal places
value_str = value_str[:20]
else:
# Scientific notation, truncate significand (mantissa)
value_str = value_str[: 20 - (str_len - idx)] + value_str[idx:]
return value_str
def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + " " * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + " " * (Card.length - strlen)
|
3d467f8837c3bed7702eaf38eebf9f57de6621ffdfe899b3ce1035de08332cfb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import copy
import csv
import fnmatch
import functools
import inspect
import itertools
import operator
import os
import re
import warnings
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from . import connect
from .docs import READ_DOCSTRING, WRITE_DOCSTRING
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
def _check_multidim_table(table, max_ndim):
"""Check that ``table`` has only columns with ndim <= ``max_ndim``.
Currently ECSV is the only built-in format that supports output of arbitrary
N-d columns, but HTML supports 2-d.
"""
# No limit?
if max_ndim is None:
return
# Check for N-d columns
nd_names = [col.info.name for col in table.itercols() if len(col.shape) > max_ndim]
if nd_names:
raise ValueError(
f"column(s) with dimension > {max_ndim} "
"cannot be be written with this format, try using 'ecsv' "
"(Enhanced CSV) format"
)
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = "2b=48Av%0-V3p>bX"
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (
dialect.delimiter == " "
)
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == "":
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == "":
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked.
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
"""All instances of this class shall have the same hash."""
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **fill_values** : dict of fill values
* **shape** : list of element shape (default [] => scalar)
* **data** : list of converted column values
* **subtype** : actual datatype for columns serialized with JSON
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
self.shape = []
self.subtype = None
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table, newline=None):
"""Get the lines from the ``table`` input.
The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file-like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a
``read()`` method, or a list of strings.
newline :
Line separator. If `None` use OS default from ``splitlines()``.
Returns
-------
lines : list
List of lines
"""
try:
if hasattr(table, "read") or (
"\n" not in table + "" and "\r" not in table + ""
):
with get_readable_fileobj(table, encoding=self.encoding) as fileobj:
table = fileobj.read()
if newline is None:
lines = table.splitlines()
else:
lines = table.split(newline)
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
if len(table) > 1:
lines = table
else:
# treat single entry as if string had been passed directly
if newline is None:
lines = table[0].splitlines()
else:
lines = table[0].split(newline)
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable'
)
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines.
"""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
"""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = " "
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = " "
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first.
"""
if self.delimiter == r"\s":
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip() + "\n"
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip(" \t")
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Yields
------
line : list of str
Each line's split values.
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = " " if self.delimiter == r"\s" else self.delimiter
csv_reader = csv.reader(
lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace,
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = " " if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
)
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals).rstrip("\r\n")
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings.
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = "NONE"
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == "\t" and not in_quote:
char = " "
lastchar = char
newline.append(char)
return "".join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if hasattr(line_or_func, "__call__"):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader.
"""
auto_format = "col{}"
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ["ASCII_TABLE_WRITE_SPACER_LINE"]
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [
re.sub("^" + self.comment, "", x).strip() for x in comment_lines
]
if comment_lines:
meta.setdefault("table", {})["comments"] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError(
"No data lines found so cannot autogenerate column names"
)
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i) for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError("No header line found in table")
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines."""
re_comment = re.compile(self.comment) if self.comment else None
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment not in (False, None):
for comment in meta.get("comments", []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(
range(self.start_line), itertools.cycle(self.write_spacer_lines)
):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table."""
return tuple(
col.name if isinstance(col, Column) else col.info.name for col in self.cols
)
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
"""
colnames = self.colnames
for name in names:
if name not in colnames:
raise KeyError(f"Column {name} does not exist")
self.cols = [col for col in self.cols if col.name not in names]
def rename_column(self, name, new_name):
"""
Rename a column.
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
"""
try:
idx = self.colnames.index(name)
except ValueError:
raise KeyError(f"Column {name} does not exist")
col = self.cols[idx]
# For writing self.cols can contain cols that are not Column. Raise
# exception in that case.
if isinstance(col, Column):
col.name = new_name
else:
raise TypeError(f"got column type {type(col)} instead of required {Column}")
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError(
f'Unknown data type ""{col.raw_type}"" for column "{col.name}"'
)
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (
_is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads
):
raise InconsistentTableError(
f"Column name {name!r} does not meet strict name requirements"
)
# When guessing require at least two columns, except for ECSV which can
# reliably be guessed from the header requirements.
if (
guessing
and len(self.colnames) <= 1
and self.__class__.__name__ != "EcsvHeader"
):
raise ValueError(
"Table format guessing requires at least two columns, got {}".format(
list(self.colnames)
)
)
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError(
"Length of names argument ({}) does not match number"
" of table columns ({})".format(len(names), len(self.colnames))
)
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ["ASCII_TABLE_WRITE_SPACER_LINE"]
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, "")]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
READ: Strip out comment lines and blank lines from list of ``lines``.
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return [x for x in nonblank_lines]
def get_data_lines(self, lines):
"""
READ: Set ``data_lines`` attribute to lines slice comprising table data values.
"""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line.
"""
return self.splitter(self.data_lines)
def masks(self, cols):
"""READ: Set fill value for each column and then apply that fill value.
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""READ, WRITE: Set fill values of individual cols based on fill_values of BaseData.
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, "fill_values"):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ""
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError(
"Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)"
)
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in (
(i, x)
for i, x in enumerate(self.header.colnames)
if x in affect_cols
):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""READ: Replace string values in col.str_vals and set masks."""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=bool)
for i, str_val in (
(i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values
):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""WRITE: replace string values in col.str_vals."""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in (
(i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values
):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, "mask"):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings.
This sets the fill values and possibly column formats from the input
formats={} keyword, then ends up calling table.pprint._pformat_col_iter()
by a circuitous path. That function does the real work of formatting.
Finally replace anything matching the fill_values.
Returns
-------
values : list of list of str
"""
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
"""Write ``self.cols`` in place to ``lines``.
Parameters
----------
lines : list
List for collecting output of writing self.cols.
"""
if hasattr(self.start_line, "__call__"):
raise TypeError("Start_line attribute cannot be callable for write()")
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""WRITE: set column formats."""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.info.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://numpy.org/doc/stable/user/basics.types.html>`_
(e.g., numpy.uint, numpy.int8, numpy.int64, numpy.float64) or a python
type covered by a numpy type (e.g., int, float, str, bool).
Returns
-------
converter : callable
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
converter_type : type
``converter_type`` tracks the generic data type produced by the
converter function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if "int" in type_name:
converter_type = IntType
elif "float" in type_name:
converter_type = FloatType
elif "bool" in type_name:
converter_type = BoolType
elif "str" in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all(
(svals == "False") | (svals == "True") | (svals == "0") | (svals == "1")
):
raise ValueError('bool input strings must be False, True, 0, 1, or ""')
vals = numpy.asarray(vals)
trues = (vals == "True") | (vals == "1")
falses = (vals == "False") | (vals == "0")
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False, True, 0, 1, or ""')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
# User-defined converters which gets set in ascii.ui if a `converter` kwarg
# is supplied.
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type).
"""
# Allow specifying a single converter instead of a list of converters.
# The input `converters` must be a ``type`` value that can init np.dtype.
try:
# Don't allow list-like things that dtype accepts
assert type(converters) is type
converters = [numpy.dtype(converters)]
except (AssertionError, TypeError):
pass
converters_out = []
try:
for converter in converters:
try:
converter_func, converter_type = converter
except TypeError as err:
if str(err).startswith("cannot unpack"):
converter_func, converter_type = convert_numpy(converter)
else:
raise
if not issubclass(converter_type, NoType):
raise ValueError("converter_type must be a subclass of NoType")
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError) as err:
raise ValueError(
"Error: invalid format for converters, see "
f"documentation\n{converters}: {err}"
)
return converters_out
def _convert_vals(self, cols):
for col in cols:
for key, converters in self.converters.items():
if fnmatch.fnmatch(col.name, key):
break
else:
if col.dtype is not None:
converters = [convert_numpy(col.dtype)]
else:
converters = self.default_converters
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = "no converters defined"
while not hasattr(col, "data"):
# Try converters, popping the unsuccessful ones from the list.
# If there are no converters left here then fail.
if not col.converters:
raise ValueError(f"Column {col.name} failed to convert: {last_err}")
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError("converter type does not match column type")
try:
col.data = converter_func(col.str_vals)
col.type = converter_type
except (OverflowError, TypeError, ValueError) as err:
# Overflow during conversion (most likely an int that
# doesn't fit in native C long). Put string at the top of
# the converters list for the next while iteration.
# With python/cpython#95778 this has been supplemented with a
# "ValueError: Exceeds the limit (4300) for integer string conversion"
# so need to catch that as well.
if isinstance(err, OverflowError) or (
isinstance(err, ValueError)
and str(err).startswith("Exceeds the limit")
):
warnings.warn(
f"OverflowError converting to {converter_type.__name__} in"
f" column {col.name}, reverting to String.",
AstropyWarning,
)
col.converters.insert(0, convert_numpy(str))
else:
col.converters.pop(0)
last_err = err
def _deduplicate_names(names):
"""Ensure there are no duplicates in ``names``.
This is done by iteratively adding ``_<N>`` to the name for increasing N
until the name is unique.
"""
new_names = []
existing_names = set()
for name in names:
base_name = name + "_"
i = 1
while name in existing_names:
# Iterate until a unique name is found
name = base_name + str(i)
i += 1
new_names.append(name)
existing_names.add(name)
return new_names
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(int), convert_numpy(float), convert_numpy(str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [
numpy.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, "mask") and numpy.any(x.mask)
else x.data
for x in cols
]
out = Table(t_cols, names=[x.name for x in cols], meta=meta["table"])
for col, out_col in zip(cols, out.columns.values()):
for attr in ("format", "unit", "description"):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, "meta"):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get("_format_name")
if format is None:
return
fast = dct.get("_fast")
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ["ascii." + format] + dct.get("_io_registry_format_aliases", [])
if dct.get("_io_registry_suffix"):
func = functools.partial(connect.io_identify, dct["_io_registry_suffix"])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
header = f"ASCII reader '{io_format}' details\n"
func.__doc__ = (
inspect.cleandoc(READ_DOCSTRING).strip()
+ "\n\n"
+ header
+ re.sub(".", "=", header)
+ "\n"
)
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_reader(io_format, Table, func)
if dct.get("_io_registry_can_write", True):
func = functools.partial(connect.io_write, io_format)
header = f"ASCII writer '{io_format}' details\n"
func.__doc__ = (
inspect.cleandoc(WRITE_DOCSTRING).strip()
+ "\n\n"
+ header
+ re.sub(".", "=", header)
+ "\n"
)
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table or BaseHeader.
For the latter this relies on BaseHeader implementing ``colnames``,
``rename_column``, and ``remove_columns``.
Parameters
----------
table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader`
Input table or BaseHeader subclass instance
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
def rename_columns(table, names):
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = "x" * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
if names is not None:
rename_columns(table, names)
else:
colnames_uniq = _deduplicate_names(table.colnames)
if colnames_uniq != list(table.colnames):
rename_columns(table, colnames_uniq)
names_set = set(table.colnames)
if include_names is not None:
names_set.intersection_update(include_names)
if exclude_names is not None:
names_set.difference_update(exclude_names)
if names_set != set(table.colnames):
remove_names = set(table.colnames) - names_set
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
# Max column dimension that writer supports for this format. Exceptions
# include ECSV (no limit) and HTML (max_ndim=2).
max_ndim = 1
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(), cols=OrderedDict())
def _check_multidim_table(self, table):
"""Check that the dimensions of columns in ``table`` are acceptable.
The reader class attribute ``max_ndim`` defines the maximum dimension of
columns that can be written using this format. The base value is ``1``,
corresponding to normal scalar columns with just a length.
Parameters
----------
table : `~astropy.table.Table`
Input table.
Raises
------
ValueError
If any column exceeds the number of allowed dimensions
"""
_check_multidim_table(table, self.max_ndim)
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file-like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + "":
self.data.table_name = os.path.basename(table)
# If one of the newline chars is set as field delimiter, only
# accept the other one as line splitter
if self.header.splitter.delimiter == "\n":
newline = "\r"
elif self.header.splitter.delimiter == "\r":
newline = "\n"
else:
newline = None
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table, newline=newline)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = (
"Number of header columns ({}) inconsistent with"
" data columns ({}) at data line {}\n"
"Header values: {}\n"
"Data values: {}".format(
n_cols, len(str_vals), i, [x.name for x in cols], str_vals
)
)
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, "table_meta"):
self.meta["table"].update(self.header.table_meta)
_apply_include_exclude_names(
self.header, self.names, self.include_names, self.exclude_names
)
table = self.outputter(self.header.cols, self.meta)
self.cols = self.header.cols
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp."""
if not hasattr(self, "lines"):
raise ValueError(
"Table must be read prior to accessing the header comment lines"
)
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(
table, self.names, self.include_names, self.exclude_names
)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Check that table column dimensions are supported by this format class.
# Most formats support only 1-d columns, but some like ECSV support N-d.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined with the subsequent line.
Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = "\\"
replace_char = " "
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append("".join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings."""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (
self.escapechar is None or lastchar != self.escapechar
):
in_quote = not in_quote
if char == "\t" and not in_quote:
char = " "
lastchar = char
newline.append(char)
return "".join(newline)
extra_reader_pars = (
"Reader",
"Inputter",
"Outputter",
"delimiter",
"comment",
"quotechar",
"header_start",
"data_start",
"data_end",
"converters",
"encoding",
"data_Splitter",
"header_Splitter",
"names",
"include_names",
"exclude_names",
"strict_names",
"fill_values",
"fill_include_names",
"fill_exclude_names",
)
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs["Inputter"] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if "fast_reader" in kwargs:
if kwargs["fast_reader"]["enable"] == "force":
raise ParameterError(
"fast_reader required with "
"{}, but this is not a fast C reader: {}".format(
kwargs["fast_reader"], Reader
)
)
else:
del kwargs["fast_reader"] # Otherwise ignore fast_reader parameter
reader_kwargs = {k: v for k, v in kwargs.items() if k not in extra_reader_pars}
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
# csv.reader is hard-coded to recognise either '\r' or '\n' as end-of-line,
# therefore DefaultSplitter cannot handle these as delimiters.
if "delimiter" in kwargs:
if kwargs["delimiter"] in ("\n", "\r", "\r\n"):
reader.header.splitter = BaseSplitter()
reader.data.splitter = BaseSplitter()
reader.header.splitter.delimiter = kwargs["delimiter"]
reader.data.splitter.delimiter = kwargs["delimiter"]
if "comment" in kwargs:
reader.header.comment = kwargs["comment"]
reader.data.comment = kwargs["comment"]
if "quotechar" in kwargs:
reader.header.splitter.quotechar = kwargs["quotechar"]
reader.data.splitter.quotechar = kwargs["quotechar"]
if "data_start" in kwargs:
reader.data.start_line = kwargs["data_start"]
if "data_end" in kwargs:
reader.data.end_line = kwargs["data_end"]
if "header_start" in kwargs:
if reader.header.start_line is not None:
reader.header.start_line = kwargs["header_start"]
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (
("data_start" not in kwargs)
and (default_header_length is not None)
and reader._format_name
not in ["fixed_width_two_line", "commented_header"]
):
reader.data.start_line = (
reader.header.start_line + default_header_length
)
elif kwargs["header_start"] is not None:
# User trying to set a None header start to some value other than None
raise ValueError("header_start cannot be modified for this Reader")
if "converters" in kwargs:
reader.outputter.converters = kwargs["converters"]
if "data_Splitter" in kwargs:
reader.data.splitter = kwargs["data_Splitter"]()
if "header_Splitter" in kwargs:
reader.header.splitter = kwargs["header_Splitter"]()
if "names" in kwargs:
reader.names = kwargs["names"]
if None in reader.names:
raise TypeError("Cannot have None for column name")
if len(set(reader.names)) != len(reader.names):
raise ValueError("Duplicate column names")
if "include_names" in kwargs:
reader.include_names = kwargs["include_names"]
if "exclude_names" in kwargs:
reader.exclude_names = kwargs["exclude_names"]
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if "strict_names" in kwargs:
reader.strict_names = kwargs["strict_names"]
if "fill_values" in kwargs:
reader.data.fill_values = kwargs["fill_values"]
if "fill_include_names" in kwargs:
reader.data.fill_include_names = kwargs["fill_include_names"]
if "fill_exclude_names" in kwargs:
reader.data.fill_exclude_names = kwargs["fill_exclude_names"]
if "encoding" in kwargs:
reader.encoding = kwargs["encoding"]
reader.inputter.encoding = kwargs["encoding"]
return reader
extra_writer_pars = (
"delimiter",
"comment",
"quotechar",
"formats",
"strip_whitespace",
"names",
"include_names",
"exclude_names",
"fill_values",
"fill_include_names",
"fill_exclude_names",
)
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module.
"""
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if "fill_values" in kwargs and kwargs["fill_values"] is None:
del kwargs["fill_values"]
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and f"fast_{Writer._format_name}" in FAST_CLASSES:
# Switch to fast writer
kwargs["fast_writer"] = fast_writer
return FAST_CLASSES[f"fast_{Writer._format_name}"](**kwargs)
writer_kwargs = {k: v for k, v in kwargs.items() if k not in extra_writer_pars}
writer = Writer(**writer_kwargs)
if "delimiter" in kwargs:
writer.header.splitter.delimiter = kwargs["delimiter"]
writer.data.splitter.delimiter = kwargs["delimiter"]
if "comment" in kwargs:
writer.header.write_comment = kwargs["comment"]
writer.data.write_comment = kwargs["comment"]
if "quotechar" in kwargs:
writer.header.splitter.quotechar = kwargs["quotechar"]
writer.data.splitter.quotechar = kwargs["quotechar"]
if "formats" in kwargs:
writer.data.formats = kwargs["formats"]
if "strip_whitespace" in kwargs:
if kwargs["strip_whitespace"]:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller("strip", " \t")
else:
writer.data.splitter.process_val = None
if "names" in kwargs:
writer.header.names = kwargs["names"]
if "include_names" in kwargs:
writer.include_names = kwargs["include_names"]
if "exclude_names" in kwargs:
writer.exclude_names = kwargs["exclude_names"]
if "fill_values" in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs["fill_values"][1] + ""
kwargs["fill_values"] = [kwargs["fill_values"]]
writer.data.fill_values = kwargs["fill_values"] + writer.data.fill_values
if "fill_include_names" in kwargs:
writer.data.fill_include_names = kwargs["fill_include_names"]
if "fill_exclude_names" in kwargs:
writer.data.fill_exclude_names = kwargs["fill_exclude_names"]
return writer
|
76021cd5a72477ce1fe2f285f814792623cf81a884ceb468c39df19871e27118 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Define the Enhanced Character-Separated-Values (ECSV) which allows for reading and
writing all the meta data associated with an astropy Table object.
"""
import json
import re
import warnings
from collections import OrderedDict
import numpy as np
from astropy.io.ascii.core import convert_numpy
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core
ECSV_VERSION = "1.0"
DELIMITERS = (" ", ",")
ECSV_DATATYPES = (
"bool",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"float128",
"string",
) # Raise warning if not one of these standard dtypes
class InvalidEcsvDatatypeWarning(AstropyUserWarning):
"""
ECSV specific Astropy warning class.
"""
class EcsvHeader(basic.BasicHeader):
"""Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""Return only non-blank lines that start with the comment regexp. For these
lines strip out the matching characters and leading/trailing whitespace.
"""
re_comment = re.compile(self.comment)
for line in lines:
line = line.strip()
if not line:
continue
match = re_comment.match(line)
if match:
out = line[match.end() :]
if out:
yield out
else:
# Stop iterating on first failed match for a non-blank line
return
def write(self, lines):
"""
Write header information in the ECSV ASCII format.
This function is called at the point when preprocessing has been done to
convert the input table columns to `self.cols` which is a list of
`astropy.io.ascii.core.Column` objects. In particular `col.str_vals`
is available for each column with the string representation of each
column item for output.
This format starts with a delimiter separated list of the column names
in order to make this format readable by humans and simple csv-type
readers. It then encodes the full table meta and column attributes and
meta as YAML and pretty-prints this in the header. Finally the
delimited column names are repeated again, for humans and readers that
look for the *last* comment line as defining the column names.
"""
if self.splitter.delimiter not in DELIMITERS:
raise ValueError(
"only space and comma are allowed for delimiter in ECSV format"
)
# Now assemble the header dict that will be serialized by the YAML dumper
header = {"cols": self.cols, "schema": "astropy-2.0"}
if self.table_meta:
header["meta"] = self.table_meta
# Set the delimiter only for the non-default option(s)
if self.splitter.delimiter != " ":
header["delimiter"] = self.splitter.delimiter
header_yaml_lines = [
f"%ECSV {ECSV_VERSION}",
"---",
] + meta.get_yaml_from_header(header)
lines.extend([self.write_comment + line for line in header_yaml_lines])
lines.append(self.splitter.join([x.info.name for x in self.cols]))
def write_comments(self, lines, meta):
"""
WRITE: Override the default write_comments to do nothing since this is handled
in the custom write method.
"""
pass
def update_meta(self, lines, meta):
"""
READ: Override the default update_meta to do nothing. This process is done
in get_cols() for this reader.
"""
pass
def get_cols(self, lines):
"""
READ: Initialize the header Column objects from the table ``lines``.
Parameters
----------
lines : list
List of table lines
"""
# Cache a copy of the original input lines before processing below
raw_lines = lines
# Extract non-blank comment (header) lines with comment character stripped
lines = list(self.process_lines(lines))
# Validate that this is a ECSV file
ecsv_header_re = r"""%ECSV [ ]
(?P<major> \d+)
\. (?P<minor> \d+)
\.? (?P<bugfix> \d+)? $"""
no_header_msg = (
'ECSV header line like "# %ECSV <version>" not found as first line.'
" This is required for a ECSV file."
)
if not lines:
raise core.InconsistentTableError(no_header_msg)
match = re.match(ecsv_header_re, lines[0].strip(), re.VERBOSE)
if not match:
raise core.InconsistentTableError(no_header_msg)
try:
header = meta.get_header_from_yaml(lines)
except meta.YamlParseError:
raise core.InconsistentTableError("unable to parse yaml in meta header")
if "meta" in header:
self.table_meta = header["meta"]
if "delimiter" in header:
delimiter = header["delimiter"]
if delimiter not in DELIMITERS:
raise ValueError(
"only space and comma are allowed for delimiter in ECSV format"
)
self.splitter.delimiter = delimiter
self.data.splitter.delimiter = delimiter
# Create the list of io.ascii column objects from `header`
header_cols = OrderedDict((x["name"], x) for x in header["datatype"])
self.names = [x["name"] for x in header["datatype"]]
# Read the first non-commented line of table and split to get the CSV
# header column names. This is essentially what the Basic reader does.
header_line = next(super().process_lines(raw_lines))
header_names = next(self.splitter([header_line]))
# Check for consistency of the ECSV vs. CSV header column names
if header_names != self.names:
raise core.InconsistentTableError(
f"column names from ECSV header {self.names} do not "
f"match names from header line of CSV data {header_names}"
)
# BaseHeader method to create self.cols, which is a list of
# io.ascii.core.Column objects (*not* Table Column objects).
self._set_cols_from_names()
# Transfer attributes from the column descriptor stored in the input
# header YAML metadata to the new columns to create this table.
for col in self.cols:
for attr in ("description", "format", "unit", "meta", "subtype"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
col.dtype = header_cols[col.name]["datatype"]
# Warn if col dtype is not a valid ECSV datatype, but allow reading for
# back-compatibility with existing older files that have numpy datatypes
# like datetime64 or object or python str, which are not in the ECSV standard.
if col.dtype not in ECSV_DATATYPES:
msg = (
f"unexpected datatype {col.dtype!r} of column {col.name!r} "
f"is not in allowed ECSV datatypes {ECSV_DATATYPES}. "
"Using anyway as a numpy dtype but beware since unexpected "
"results are possible."
)
warnings.warn(msg, category=InvalidEcsvDatatypeWarning)
# Subtype is written like "int64[2,null]" and we want to split this
# out to "int64" and [2, None].
subtype = col.subtype
if subtype and "[" in subtype:
idx = subtype.index("[")
col.subtype = subtype[:idx]
col.shape = json.loads(subtype[idx:])
# Convert ECSV "string" to numpy "str"
for attr in ("dtype", "subtype"):
if getattr(col, attr) == "string":
setattr(col, attr, "str")
# ECSV subtype of 'json' maps to numpy 'object' dtype
if col.subtype == "json":
col.subtype = "object"
def _check_dtype_is_str(col):
if col.dtype != "str":
raise ValueError(f'datatype of column {col.name!r} must be "string"')
class EcsvOutputter(core.TableOutputter):
"""
After reading the input lines and processing, convert the Reader columns
and metadata to an astropy.table.Table object. This overrides the default
converters to be an empty list because there is no "guessing" of the
conversion function.
"""
default_converters = []
def __call__(self, cols, meta):
# Convert to a Table with all plain Column subclass columns
out = super().__call__(cols, meta)
# If mixin columns exist (based on the special '__mixin_columns__'
# key in the table ``meta``), then use that information to construct
# appropriate mixin columns and remove the original data columns.
# If no __mixin_columns__ exists then this function just passes back
# the input table.
out = serialize._construct_mixins_from_columns(out)
return out
def _convert_vals(self, cols):
"""READ: Convert str_vals in `cols` to final arrays with correct dtypes.
This is adapted from ``BaseOutputter._convert_vals``. In the case of ECSV
there is no guessing and all types are known in advance. A big change
is handling the possibility of JSON-encoded values, both unstructured
object data and structured values that may contain masked data.
"""
for col in cols:
try:
# 1-d or N-d object columns are serialized as JSON.
if col.subtype == "object":
_check_dtype_is_str(col)
col_vals = [json.loads(val) for val in col.str_vals]
col.data = np.empty([len(col_vals)] + col.shape, dtype=object)
col.data[...] = col_vals
# Variable length arrays with shape (n, m, ..., *) for fixed
# n, m, .. and variable in last axis. Masked values here are
# not currently supported.
elif col.shape and col.shape[-1] is None:
_check_dtype_is_str(col)
# Empty (blank) values in original ECSV are changed to "0"
# in str_vals with corresponding col.mask being created and
# set accordingly. Instead use an empty list here.
if hasattr(col, "mask"):
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = "[]"
# Remake as a 1-d object column of numpy ndarrays or
# MaskedArray using the datatype specified in the ECSV file.
col_vals = []
for str_val in col.str_vals:
obj_val = json.loads(str_val) # list or nested lists
try:
arr_val = np.array(obj_val, dtype=col.subtype)
except TypeError:
# obj_val has entries that are inconsistent with
# dtype. For a valid ECSV file the only possibility
# is None values (indicating missing values).
data = np.array(obj_val, dtype=object)
# Replace all the None with an appropriate fill value
mask = data == None
kind = np.dtype(col.subtype).kind
data[mask] = {"U": "", "S": b""}.get(kind, 0)
arr_val = np.ma.array(data.astype(col.subtype), mask=mask)
col_vals.append(arr_val)
col.shape = ()
col.dtype = np.dtype(object)
# np.array(col_vals_arr, dtype=object) fails ?? so this workaround:
col.data = np.empty(len(col_vals), dtype=object)
col.data[:] = col_vals
# Multidim columns with consistent shape (n, m, ...). These
# might be masked.
elif col.shape:
_check_dtype_is_str(col)
# Change empty (blank) values in original ECSV to something
# like "[[null, null],[null,null]]" so subsequent JSON
# decoding works. Delete `col.mask` so that later code in
# core TableOutputter.__call__() that deals with col.mask
# does not run (since handling is done here already).
if hasattr(col, "mask"):
all_none_arr = np.full(
shape=col.shape, fill_value=None, dtype=object
)
all_none_json = json.dumps(all_none_arr.tolist())
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = all_none_json
del col.mask
col_vals = [json.loads(val) for val in col.str_vals]
# Make a numpy object array of col_vals to look for None
# (masked values)
data = np.array(col_vals, dtype=object)
mask = data == None
if not np.any(mask):
# No None's, just convert to required dtype
col.data = data.astype(col.subtype)
else:
# Replace all the None with an appropriate fill value
kind = np.dtype(col.subtype).kind
data[mask] = {"U": "", "S": b""}.get(kind, 0)
# Finally make a MaskedArray with the filled data + mask
col.data = np.ma.array(data.astype(col.subtype), mask=mask)
# Regular scalar value column
else:
if col.subtype:
warnings.warn(
f"unexpected subtype {col.subtype!r} set for column "
f"{col.name!r}, using dtype={col.dtype!r} instead.",
category=InvalidEcsvDatatypeWarning,
)
converter_func, _ = convert_numpy(col.dtype)
col.data = converter_func(col.str_vals)
if col.data.shape[1:] != tuple(col.shape):
raise ValueError(
"shape mismatch between value and column specifier"
)
except json.JSONDecodeError:
raise ValueError(
f"column {col.name!r} failed to convert: "
"column value is not valid JSON"
)
except Exception as exc:
raise ValueError(f"column {col.name!r} failed to convert: {exc}")
class EcsvData(basic.BasicData):
def _set_fill_values(self, cols):
"""READ: Set the fill values of the individual cols based on fill_values of BaseData.
For ECSV handle the corner case of data that has been serialized using
the serialize_method='data_mask' option, which writes the full data and
mask directly, AND where that table includes a string column with zero-length
string entries ("") which are valid data.
Normally the super() method will set col.fill_value=('', '0') to replace
blanks with a '0'. But for that corner case subset, instead do not do
any filling.
"""
super()._set_fill_values(cols)
# Get the serialized columns spec. It might not exist and there might
# not even be any table meta, so punt in those cases.
try:
scs = self.header.table_meta["__serialized_columns__"]
except (AttributeError, KeyError):
return
# Got some serialized columns, so check for string type and serialized
# as a MaskedColumn. Without 'data_mask', MaskedColumn objects are
# stored to ECSV as normal columns.
for col in cols:
if (
col.dtype == "str"
and col.name in scs
and scs[col.name]["__class__"] == "astropy.table.column.MaskedColumn"
):
col.fill_values = {} # No data value replacement
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings.
This version considerably simplifies the base method:
- No need to set fill values and column formats
- No per-item formatting, just use repr()
- Use JSON for object-type or multidim values
- Only Column or MaskedColumn can end up as cols here.
- Only replace masked values with "", not the generalized filling
"""
for col in self.cols:
if len(col.shape) > 1 or col.info.dtype.kind == "O":
def format_col_item(idx):
obj = col[idx]
try:
obj = obj.tolist()
except AttributeError:
pass
return json.dumps(obj, separators=(",", ":"))
else:
def format_col_item(idx):
return str(col[idx])
try:
col.str_vals = [format_col_item(idx) for idx in range(len(col))]
except TypeError as exc:
raise TypeError(
f"could not convert column {col.info.name!r} to string: {exc}"
) from exc
# Replace every masked value in a 1-d column with an empty string.
# For multi-dim columns this gets done by JSON via "null".
if hasattr(col, "mask") and col.ndim == 1:
for idx in col.mask.nonzero()[0]:
col.str_vals[idx] = ""
out = [col.str_vals for col in self.cols]
return out
class Ecsv(basic.Basic):
"""ECSV (Enhanced Character Separated Values) format table.
Th ECSV format allows for specification of key table and column meta-data, in
particular the data type and unit.
See: https://github.com/astropy/astropy-APEs/blob/main/APE6.rst
Examples
--------
>>> from astropy.table import Table
>>> ecsv_content = '''# %ECSV 0.9
... # ---
... # datatype:
... # - {name: a, unit: m / s, datatype: int64, format: '%03d'}
... # - {name: b, unit: km, datatype: int64, description: This is column b}
... a b
... 001 2
... 004 3
... '''
>>> Table.read(ecsv_content, format='ascii.ecsv')
<Table length=2>
a b
m / s km
int64 int64
----- -----
001 2
004 3
"""
_format_name = "ecsv"
_description = "Enhanced CSV"
_io_registry_suffix = ".ecsv"
header_class = EcsvHeader
data_class = EcsvData
outputter_class = EcsvOutputter
max_ndim = None # No limit on column dimensionality
def update_table_data(self, table):
"""
Update table columns in place if mixin columns are present.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
with serialize_context_as("ecsv"):
out = serialize.represent_mixins_as_columns(table)
return out
|
3f7378dd648a29d0e03a71b4e7cdc48a4fce5d36a8b9e439e9c6948e747699e6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for serializing astropy objects to YAML.
It provides functions `~astropy.io.misc.yaml.dump`,
`~astropy.io.misc.yaml.load`, and `~astropy.io.misc.yaml.load_all` which
call the corresponding functions in `PyYaml <https://pyyaml.org>`_ but use the
`~astropy.io.misc.yaml.AstropyDumper` and `~astropy.io.misc.yaml.AstropyLoader`
classes to define custom YAML tags for the following astropy classes:
- `astropy.units.Unit`
- `astropy.units.Quantity`
- `astropy.time.Time`
- `astropy.time.TimeDelta`
- `astropy.coordinates.SkyCoord`
- `astropy.coordinates.Angle`
- `astropy.coordinates.Latitude`
- `astropy.coordinates.Longitude`
- `astropy.coordinates.EarthLocation`
- `astropy.table.SerializedColumn`
Examples
--------
>>> from astropy.io.misc import yaml
>>> import astropy.units as u
>>> from astropy.time import Time
>>> from astropy.coordinates import EarthLocation
>>> t = Time(2457389.0, format='mjd',
... location=EarthLocation(1000, 2000, 3000, unit=u.km))
>>> td = yaml.dump(t)
>>> print(td)
!astropy.time.Time
format: mjd
in_subfmt: '*'
jd1: 4857390.0
jd2: -0.5
location: !astropy.coordinates.earth.EarthLocation
ellipsoid: WGS84
x: !astropy.units.Quantity
unit: &id001 !astropy.units.Unit {unit: km}
value: 1000.0
y: !astropy.units.Quantity
unit: *id001
value: 2000.0
z: !astropy.units.Quantity
unit: *id001
value: 3000.0
out_subfmt: '*'
precision: 3
scale: utc
>>> ty = yaml.load(td)
>>> ty
<Time object: scale='utc' format='mjd' value=2457389.0>
>>> ty.location # doctest: +FLOAT_CMP
<EarthLocation (1000., 2000., 3000.) km>
"""
import base64
import numpy as np
import yaml
from astropy import coordinates as coords
from astropy import units as u
from astropy.table import SerializedColumn
from astropy.time import Time, TimeDelta
__all__ = ["AstropyLoader", "AstropyDumper", "load", "load_all", "dump"]
def _unit_representer(dumper, obj):
out = {"unit": str(obj.to_string())}
return dumper.represent_mapping("!astropy.units.Unit", out)
def _unit_constructor(loader, node):
map = loader.construct_mapping(node)
return u.Unit(map["unit"], parse_strict="warn")
def _serialized_column_representer(dumper, obj):
out = dumper.represent_mapping("!astropy.table.SerializedColumn", obj)
return out
def _serialized_column_constructor(loader, node):
map = loader.construct_mapping(node)
return SerializedColumn(map)
def _time_representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping("!astropy.time.Time", out)
def _time_constructor(loader, node):
map = loader.construct_mapping(node)
out = Time.info._construct_from_dict(map)
return out
def _timedelta_representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping("!astropy.time.TimeDelta", out)
def _timedelta_constructor(loader, node):
map = loader.construct_mapping(node)
out = TimeDelta.info._construct_from_dict(map)
return out
def _ndarray_representer(dumper, obj):
if not (obj.flags["C_CONTIGUOUS"] or obj.flags["F_CONTIGUOUS"]):
obj = np.ascontiguousarray(obj)
if np.isfortran(obj):
obj = obj.T
order = "F"
else:
order = "C"
data_b64 = base64.b64encode(obj.tobytes())
out = dict(
buffer=data_b64,
dtype=str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr,
shape=obj.shape,
order=order,
)
return dumper.represent_mapping("!numpy.ndarray", out)
def _ndarray_constructor(loader, node):
# Convert mapping to a dict useful for initializing ndarray.
# Need deep=True since for structured dtype, the contents
# include lists and tuples, which need recursion via
# construct_sequence.
map = loader.construct_mapping(node, deep=True)
map["buffer"] = base64.b64decode(map["buffer"])
return np.ndarray(**map)
def _void_representer(dumper, obj):
data_b64 = base64.b64encode(obj.tobytes())
out = dict(
buffer=data_b64,
dtype=str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr,
)
return dumper.represent_mapping("!numpy.void", out)
def _void_constructor(loader, node):
# Interpret as node as an array scalar and then index to change to void.
map = loader.construct_mapping(node, deep=True)
map["buffer"] = base64.b64decode(map["buffer"])
return np.ndarray(shape=(), **map)[()]
def _quantity_representer(tag):
def representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping(tag, out)
return representer
def _quantity_constructor(cls):
def constructor(loader, node):
map = loader.construct_mapping(node)
return cls.info._construct_from_dict(map)
return constructor
def _skycoord_representer(dumper, obj):
map = obj.info._represent_as_dict()
out = dumper.represent_mapping("!astropy.coordinates.sky_coordinate.SkyCoord", map)
return out
def _skycoord_constructor(loader, node):
map = loader.construct_mapping(node)
out = coords.SkyCoord.info._construct_from_dict(map)
return out
# Straight from yaml's Representer
def _complex_representer(self, data):
if data.imag == 0.0:
data = f"{data.real!r}"
elif data.real == 0.0:
data = f"{data.imag!r}j"
elif data.imag > 0:
data = f"{data.real!r}+{data.imag!r}j"
else:
data = f"{data.real!r}{data.imag!r}j"
return self.represent_scalar("tag:yaml.org,2002:python/complex", data)
def _complex_constructor(loader, node):
map = loader.construct_scalar(node)
return complex(map)
class AstropyLoader(yaml.SafeLoader):
"""
Custom SafeLoader that constructs astropy core objects as well
as Python tuple and unicode objects.
This class is not directly instantiated by user code, but instead is
used to maintain the available constructor functions that are
called when parsing a YAML stream. See the `PyYaml documentation
<https://pyyaml.org/wiki/PyYAMLDocumentation>`_ for details of the
class signature.
"""
def _construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def _construct_python_unicode(self, node):
return self.construct_scalar(node)
class AstropyDumper(yaml.SafeDumper):
"""
Custom SafeDumper that represents astropy core objects as well
as Python tuple and unicode objects.
This class is not directly instantiated by user code, but instead is
used to maintain the available representer functions that are
called when generating a YAML stream from an object. See the
`PyYaml documentation <https://pyyaml.org/wiki/PyYAMLDocumentation>`_
for details of the class signature.
"""
def _represent_tuple(self, data):
return self.represent_sequence("tag:yaml.org,2002:python/tuple", data)
AstropyDumper.add_multi_representer(u.UnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.FunctionUnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.StructuredUnit, _unit_representer)
AstropyDumper.add_representer(tuple, AstropyDumper._represent_tuple)
AstropyDumper.add_representer(np.ndarray, _ndarray_representer)
AstropyDumper.add_representer(np.void, _void_representer)
AstropyDumper.add_representer(Time, _time_representer)
AstropyDumper.add_representer(TimeDelta, _timedelta_representer)
AstropyDumper.add_representer(coords.SkyCoord, _skycoord_representer)
AstropyDumper.add_representer(SerializedColumn, _serialized_column_representer)
# Numpy dtypes
AstropyDumper.add_representer(np.bool_, yaml.representer.SafeRepresenter.represent_bool)
for np_type in [
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]:
AstropyDumper.add_representer(
np_type, yaml.representer.SafeRepresenter.represent_int
)
for np_type in [np.float_, np.float16, np.float32, np.float64, np.longdouble]:
AstropyDumper.add_representer(
np_type, yaml.representer.SafeRepresenter.represent_float
)
for np_type in [np.complex_, complex, np.complex64, np.complex128]:
AstropyDumper.add_representer(np_type, _complex_representer)
AstropyLoader.add_constructor("tag:yaml.org,2002:python/complex", _complex_constructor)
AstropyLoader.add_constructor(
"tag:yaml.org,2002:python/tuple", AstropyLoader._construct_python_tuple
)
AstropyLoader.add_constructor(
"tag:yaml.org,2002:python/unicode", AstropyLoader._construct_python_unicode
)
AstropyLoader.add_constructor("!astropy.units.Unit", _unit_constructor)
AstropyLoader.add_constructor("!numpy.ndarray", _ndarray_constructor)
AstropyLoader.add_constructor("!numpy.void", _void_constructor)
AstropyLoader.add_constructor("!astropy.time.Time", _time_constructor)
AstropyLoader.add_constructor("!astropy.time.TimeDelta", _timedelta_constructor)
AstropyLoader.add_constructor(
"!astropy.coordinates.sky_coordinate.SkyCoord", _skycoord_constructor
)
AstropyLoader.add_constructor(
"!astropy.table.SerializedColumn", _serialized_column_constructor
)
for cls, tag in (
(u.Quantity, "!astropy.units.Quantity"),
(u.Magnitude, "!astropy.units.Magnitude"),
(u.Dex, "!astropy.units.Dex"),
(u.Decibel, "!astropy.units.Decibel"),
(coords.Angle, "!astropy.coordinates.Angle"),
(coords.Latitude, "!astropy.coordinates.Latitude"),
(coords.Longitude, "!astropy.coordinates.Longitude"),
(coords.EarthLocation, "!astropy.coordinates.earth.EarthLocation"),
):
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
for cls in list(coords.representation.REPRESENTATION_CLASSES.values()) + list(
coords.representation.DIFFERENTIAL_CLASSES.values()
):
name = cls.__name__
# Add representations/differentials defined in astropy.
if name in coords.representation.__all__:
tag = "!astropy.coordinates." + name
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
def load(stream):
"""Parse the first YAML document in a stream using the AstropyLoader and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load(stream, Loader=AstropyLoader)
def load_all(stream):
"""Parse the all YAML documents in a stream using the AstropyLoader class and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load_all(stream, Loader=AstropyLoader)
def dump(data, stream=None, **kwargs):
"""Serialize a Python object into a YAML stream using the AstropyDumper class.
If stream is None, return the produced string instead.
Parameters
----------
data : object
Object to serialize to YAML
stream : file-like, optional
YAML output (if not supplied a string is returned)
**kwargs
Other keyword arguments that get passed to yaml.dump()
Returns
-------
out : str or None
If no ``stream`` is supplied then YAML output is returned as str
"""
kwargs["Dumper"] = AstropyDumper
kwargs.setdefault("default_flow_style", None)
return yaml.dump(data, stream=stream, **kwargs)
|
b2d71d3011a93b03973a1aa1084fcfeaae5330838c2d9f92793c33fb72734126 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing Parquet
tables that are not meant to be used directly, but instead are
available as readers/writers in `astropy.table`. See
:ref:`astropy:table_io` for more details.
"""
import os
import warnings
import numpy as np
from astropy.utils import minversion
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
PARQUET_SIGNATURE = b"PAR1"
__all__ = [] # nothing is publicly scoped
def parquet_identify(origin, filepath, fileobj, *args, **kwargs):
"""Checks if input is in the Parquet format.
Parameters
----------
origin : Any
filepath : str or None
fileobj : `~pyarrow.NativeFile` or None
*args, **kwargs
Returns
-------
is_parquet : bool
True if 'fileobj' is not None and is a pyarrow file, or if
'filepath' is a string ending with '.parquet' or '.parq'.
False otherwise.
"""
if fileobj is not None:
try: # safely test if pyarrow file
pos = fileobj.tell() # store current stream position
except AttributeError:
return False
signature = fileobj.read(4) # read first 4 bytes
fileobj.seek(pos) # return to original location
return signature == PARQUET_SIGNATURE
elif filepath is not None:
return filepath.endswith((".parquet", ".parq"))
else:
return False
def read_table_parquet(
input, include_names=None, exclude_names=None, schema_only=False, filters=None
):
"""
Read a Table object from a Parquet file.
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
The ``filters`` parameter consists of predicates that are expressed
in disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
DNF allows arbitrary boolean logical combinations of single column
predicates. The innermost tuples each describe a single column predicate.
The list of inner predicates is interpreted as a conjunction (AND),
forming a more selective and multiple column predicate. Finally, the most
outer list combines these filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation.
Each tuple has format: (``key``, ``op``, ``value``) and compares the
``key`` with the ``value``.
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
``value`` must be a collection such as a ``list``, a ``set`` or a
``tuple``.
For example:
.. code-block:: python
('x', '=', 0)
('y', 'in', ['a', 'b', 'c'])
('z', 'not in', {'a','b'})
Parameters
----------
input : str or path-like or file-like object
If a string or path-like object, the filename to read the table from.
If a file-like object, the stream to read data.
include_names : list [str], optional
List of names to include in output. If not supplied, then
include all columns.
exclude_names : list [str], optional
List of names to exclude from output (applied after ``include_names``).
If not supplied then no columns are excluded.
schema_only : bool, optional
Only read the schema/metadata with table information.
filters : list [tuple] or list [list [tuple] ] or None, optional
Rows which do not match the filter predicate will be removed from
scanned data. See `pyarrow.parquet.read_table()` for details.
Returns
-------
table : `~astropy.table.Table`
Table will have zero rows and only metadata information
if schema_only is True.
"""
pa, parquet, _ = get_pyarrow()
if not isinstance(input, (str, os.PathLike)):
# The 'read' attribute is the key component of a generic
# file-like object.
if not hasattr(input, "read"):
raise TypeError("pyarrow can only open path-like or file-like objects.")
schema = parquet.read_schema(input)
# Pyarrow stores all metadata as byte-strings, so we convert
# to UTF-8 strings here.
if schema.metadata is not None:
md = {k.decode("UTF-8"): v.decode("UTF-8") for k, v in schema.metadata.items()}
else:
md = {}
from astropy.table import Column, Table, meta, serialize
# parse metadata from table yaml
meta_dict = {}
if "table_meta_yaml" in md:
meta_yaml = md.pop("table_meta_yaml").split("\n")
meta_hdr = meta.get_header_from_yaml(meta_yaml)
if "meta" in meta_hdr:
meta_dict = meta_hdr["meta"]
else:
meta_hdr = None
# parse and set serialized columns
full_table_columns = {name: name for name in schema.names}
has_serialized_columns = False
if "__serialized_columns__" in meta_dict:
has_serialized_columns = True
serialized_columns = meta_dict["__serialized_columns__"]
for scol in serialized_columns:
for name in _get_names(serialized_columns[scol]):
full_table_columns[name] = scol
use_names = set(full_table_columns.values())
# Apply include_names before exclude_names
if include_names is not None:
use_names.intersection_update(include_names)
if exclude_names is not None:
use_names.difference_update(exclude_names)
# Preserve column ordering via list, and use this dict trick
# to remove duplicates and preserve ordering (for mixin columns)
use_names = list(
dict.fromkeys([x for x in full_table_columns.values() if x in use_names])
)
# names_to_read is a list of actual serialized column names, where
# e.g. the requested name 'time' becomes ['time.jd1', 'time.jd2']
names_to_read = []
for name in use_names:
names = [n for n, col in full_table_columns.items() if name == col]
names_to_read.extend(names)
if not names_to_read:
raise ValueError("No include_names specified were found in the table.")
# We need to pop any unread serialized columns out of the meta_dict.
if has_serialized_columns:
for scol in list(meta_dict["__serialized_columns__"].keys()):
if scol not in use_names:
meta_dict["__serialized_columns__"].pop(scol)
# whether to return the whole table or a formatted empty table.
if not schema_only:
# Read the pyarrow table, specifying columns and filters.
pa_table = parquet.read_table(input, columns=names_to_read, filters=filters)
num_rows = pa_table.num_rows
else:
num_rows = 0
# Determine numpy/astropy types of columns from the arrow table.
dtype = []
for name in names_to_read:
t = schema.field(name).type
shape = None
if isinstance(t, pa.FixedSizeListType):
# The FixedSizeListType has an arrow value_type and a size.
value_type = t.value_type
shape = (t.list_size,)
elif isinstance(t, pa.ListType):
# The ListType (variable length arrays) has a value type.
value_type = t.value_type
else:
# All other arrow column types are the value_type.
value_type = t
if value_type not in (pa.string(), pa.binary()):
# Convert the pyarrow value type into a numpy dtype (which is returned
# by the to_pandas_type() method).
# If this is an array column, the numpy dtype needs the shape as well.
if shape is None:
dtype.append(value_type.to_pandas_dtype())
else:
dtype.append((value_type.to_pandas_dtype(), shape))
continue
# Special-case for string and binary columns
md_name = f"table::len::{name}"
if md_name in md:
# String/bytes length from header.
strlen = int(md[md_name])
elif schema_only: # Find the maximum string length.
# Choose an arbitrary string length since
# are not reading in the table.
strlen = 10
warnings.warn(
f"No {md_name} found in metadata. Guessing {{strlen}} for schema.",
AstropyUserWarning,
)
else:
strlen = max(len(row.as_py()) for row in pa_table[name])
warnings.warn(
f"No {md_name} found in metadata. Using longest string"
f" ({strlen} characters).",
AstropyUserWarning,
)
strname = f"U{strlen}" if value_type == pa.string() else f"|S{strlen}"
# If this is an array column, the numpy dtype needs the shape as well.
if shape is None:
dtype.append(strname)
else:
dtype.append((strname, shape))
if schema_only:
# If we only need the schema, create an empty table with the correct dtype.
data = np.zeros(0, dtype=list(zip(names_to_read, dtype)))
table = Table(data=data, meta=meta_dict)
else:
# If we need the full table, create the table and add the columns
# one at a time. This minimizes data copying.
table = Table(meta=meta_dict)
for name, dt in zip(names_to_read, dtype):
# First convert the arrow column to a numpy array.
col = pa_table[name].to_numpy()
t = schema.field(name).type
if t in (pa.string(), pa.binary()):
# If it is a string/binary type, coerce it to the correct type.
col = col.astype(dt)
elif isinstance(t, pa.FixedSizeListType):
# If it is a FixedSizeListType (array column) then it needs to
# be broken into a 2D array, but only if the table has a non-zero
# length.
if len(col) > 0:
col = np.stack(col)
if t.value_type in (pa.string(), pa.binary()):
# If it is a string/binary type, coerce it to the
# correct type.
# The conversion dtype is only the first element
# in the dtype tuple.
col = col.astype(dt[0])
else:
# This is an empty column, and needs to be created with the
# correct type.
col = np.zeros(0, dtype=dt)
elif isinstance(t, pa.ListType):
# If we have a variable length string/binary column,
# we need to convert each row to the proper type.
if t.value_type in (pa.string(), pa.binary()):
col = np.array([row.astype(dt) for row in col], dtype=np.object_)
table.add_column(Column(name=name, data=col))
if meta_hdr is not None:
# Set description, format, unit, meta from the column
# metadata that was serialized with the table.
header_cols = {x["name"]: x for x in meta_hdr["datatype"]}
for col in table.columns.values():
for attr in ("description", "format", "unit", "meta"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Convert all compound columns to astropy objects
# (e.g. time.jd1, time.jd2 into a single time column)
table = serialize._construct_mixins_from_columns(table)
return table
def write_table_parquet(table, output, overwrite=False):
"""
Write a Table object to a Parquet file.
The parquet writer supports tables with regular columns, fixed-size array
columns, and variable-length array columns (provided all arrays have the
same type).
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or path-like
The filename to write the table to.
overwrite : bool, optional
Whether to overwrite any existing file without warning. Default `False`.
Notes
-----
Tables written with array columns (fixed-size or variable-length) cannot
be read with pandas.
Raises
------
ValueError
If one of the columns has a mixed-type variable-length array, or
if it is a zero-length table and any of the columns are variable-length
arrays.
"""
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
pa, parquet, writer_version = get_pyarrow()
if not isinstance(output, (str, os.PathLike)):
raise TypeError(f"`output` should be a string or path-like, not {output}")
# Convert all compound columns into serialized column names, where
# e.g. 'time' becomes ['time.jd1', 'time.jd2'].
with serialize_context_as("parquet"):
encode_table = serialize.represent_mixins_as_columns(table)
# We store the encoded serialization metadata as a yaml string.
meta_yaml = meta.get_yaml_from_table(encode_table)
meta_yaml_str = "\n".join(meta_yaml)
# Build the pyarrow schema by converting from the numpy dtype of each
# column to an equivalent pyarrow type with from_numpy_dtype()
type_list = []
for name in encode_table.dtype.names:
dt = encode_table.dtype[name]
if dt.type == np.object_:
# If the column type is np.object_, then it should be a column
# of variable-length arrays. This can be serialized with parquet
# provided all of the elements have the same data-type.
# Additionally, if the table has no elements, we cannot deduce
# the datatype, and hence cannot serialize the table.
if len(encode_table) > 0:
obj_dtype = encode_table[name][0].dtype
# Check that the variable-length array all has the same type.
for row in encode_table[name]:
if row.dtype != obj_dtype:
raise ValueError(
f"Cannot serialize mixed-type column ({name}) with parquet."
)
# Calling pa.list_() creates a ListType which is an array of variable-
# length elements.
arrow_type = pa.list_(
value_type=pa.from_numpy_dtype(obj_dtype.type),
)
else:
raise ValueError(
"Cannot serialize zero-length table "
f"with object column ({name}) with parquet."
)
elif len(dt.shape) > 0:
# This column has a shape, and is an array type column. Calling
# pa.list_() with a list_size creates a FixedSizeListType, which
# is an array of fixed-length elements.
arrow_type = pa.list_(
value_type=pa.from_numpy_dtype(dt.subdtype[0].type),
list_size=np.prod(dt.shape),
)
else:
# This is a standard column.
arrow_type = pa.from_numpy_dtype(dt.type)
type_list.append((name, arrow_type))
metadata = {}
for name, col in encode_table.columns.items():
# Parquet will retain the datatypes of columns, but string and
# byte column length is lost. Therefore, we special-case these
# types to record the length for precise round-tripping.
t = col.dtype.type
itemsize = col.dtype.itemsize
if t is np.object_:
t = encode_table[name][0].dtype.type
if t == np.str_ or t == np.bytes_:
# We need to scan through all of them.
itemsize = -1
for row in encode_table[name]:
itemsize = max(itemsize, row.dtype.itemsize)
if t is np.str_:
metadata[f"table::len::{name}"] = str(itemsize // 4)
elif t is np.bytes_:
metadata[f"table::len::{name}"] = str(itemsize)
metadata["table_meta_yaml"] = meta_yaml_str
# Pyarrow stores all metadata as byte strings, so we explicitly encode
# our unicode strings in metadata as UTF-8 byte strings here.
metadata_encode = {
k.encode("UTF-8"): v.encode("UTF-8") for k, v in metadata.items()
}
schema = pa.schema(type_list, metadata=metadata_encode)
if os.path.exists(output):
if overwrite:
# We must remove the file prior to writing below.
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(output, schema, version=writer_version) as writer:
# Convert each Table column to a pyarrow array
arrays = []
for name in encode_table.dtype.names:
dt = encode_table.dtype[name]
if dt.type == np.object_:
# Turn the column into a list of numpy arrays.
val = [row for row in encode_table[name]]
elif len(dt.shape) > 0:
if len(encode_table) > 0:
val = np.split(encode_table[name].ravel(), len(encode_table))
else:
val = []
else:
val = encode_table[name]
arrays.append(pa.array(val, type=schema.field(name).type))
# Create a pyarrow table from the list of arrays and the schema
pa_table = pa.Table.from_arrays(arrays, schema=schema)
# Write the pyarrow table to a file
writer.write_table(pa_table)
def _get_names(_dict):
"""Recursively find the names in a serialized column dictionary.
Parameters
----------
_dict : `dict`
Dictionary from astropy __serialized_columns__
Returns
-------
all_names : `list` [`str`]
All the column names mentioned in _dict and sub-dicts.
"""
all_names = []
for k, v in _dict.items():
if isinstance(v, dict):
all_names.extend(_get_names(v))
elif k == "name":
all_names.append(v)
return all_names
def register_parquet():
"""
Register Parquet with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader("parquet", Table, read_table_parquet)
io_registry.register_writer("parquet", Table, write_table_parquet)
io_registry.register_identifier("parquet", Table, parquet_identify)
def get_pyarrow():
try:
import pyarrow as pa
from pyarrow import parquet
except ImportError:
raise Exception("pyarrow is required to read and write parquet files")
if minversion(pa, "6.0.0"):
writer_version = "2.4"
else:
writer_version = "2.0"
return pa, parquet, writer_version
|
7a866dce406be24526dd445436f465ec2732f438fb19d6bf3e6e29b370f4e6e1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: Test FITS parsing
# STDLIB
import base64
import codecs
import gzip
import io
import re
import urllib.request
import warnings
# THIRD-PARTY
import numpy as np
from numpy import ma
# LOCAL
from astropy import __version__ as astropy_version
from astropy.io import fits
from astropy.utils.collections import HomogeneousList
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.xml.writer import XMLWriter
from . import converters
from . import ucd as ucd_mod
from . import util, xmlutil
from .exceptions import (
E06,
E08,
E09,
E10,
E11,
E12,
E13,
E15,
E16,
E17,
E18,
E19,
E20,
E21,
E22,
E23,
E25,
W06,
W07,
W08,
W09,
W10,
W11,
W12,
W13,
W15,
W17,
W18,
W19,
W20,
W21,
W22,
W26,
W27,
W28,
W29,
W32,
W33,
W35,
W36,
W37,
W38,
W40,
W41,
W42,
W43,
W44,
W45,
W50,
W52,
W53,
W54,
vo_raise,
vo_reraise,
vo_warn,
warn_or_raise,
warn_unknown_attrs,
)
try:
from . import tablewriter
_has_c_tabledata_writer = True
except ImportError:
_has_c_tabledata_writer = False
__all__ = [
"Link",
"Info",
"Values",
"Field",
"Param",
"CooSys",
"TimeSys",
"FieldRef",
"ParamRef",
"Group",
"Table",
"Resource",
"VOTableFile",
"Element",
]
# The default number of rows to read in each chunk before converting
# to an array.
DEFAULT_CHUNK_SIZE = 256
RESIZE_AMOUNT = 1.5
######################################################################
# FACTORY FUNCTIONS
def _resize(masked, new_size):
"""
Masked arrays can not be resized inplace, and `np.resize` and
`ma.resize` are both incompatible with structured arrays.
Therefore, we do all this.
"""
new_array = ma.zeros((new_size,), dtype=masked.dtype)
length = min(len(masked), new_size)
new_array[:length] = masked[:length]
return new_array
def _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):
"""
Creates a function useful for looking up an element by a given
attribute.
Parameters
----------
attr : str
The attribute name
unique : bool
Should be `True` if the attribute is unique and therefore this
should return only one value. Otherwise, returns a list of
values.
iterator : generator
A generator that iterates over some arbitrary set of elements
element_name : str
The XML element name of the elements being iterated over (used
for error messages only).
doc : str
A docstring to apply to the generated function.
Returns
-------
factory : function
A function that looks up an element by the given attribute.
"""
def lookup_by_attr(self, ref, before=None):
"""
Given a string *ref*, finds the first element in the iterator
where the given attribute == *ref*. If *before* is provided,
will stop searching at the object *before*. This is
important, since "forward references" are not allowed in the
VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if getattr(element, attr, None) == ref:
vo_raise(
f"{element_name} references itself",
element._config,
element._pos,
KeyError,
)
break
if getattr(element, attr, None) == ref:
yield element
def lookup_by_attr_unique(self, ref, before=None):
for element in lookup_by_attr(self, ref, before=before):
return element
raise KeyError(
"No {} with {} '{}' found before the referencing {}".format(
element_name, attr, ref, element_name
)
)
if unique:
lookup_by_attr_unique.__doc__ = doc
return lookup_by_attr_unique
else:
lookup_by_attr.__doc__ = doc
return lookup_by_attr
def _lookup_by_id_or_name_factory(iterator, element_name, doc):
"""
Like `_lookup_by_attr_factory`, but looks in both the "ID" and
"name" attributes.
"""
def lookup_by_id_or_name(self, ref, before=None):
"""
Given an key *ref*, finds the first element in the iterator
with the attribute ID == *ref* or name == *ref*. If *before*
is provided, will stop searching at the object *before*. This
is important, since "forward references" are not allowed in
the VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if ref in (element.ID, element.name):
vo_raise(
f"{element_name} references itself",
element._config,
element._pos,
KeyError,
)
break
if ref in (element.ID, element.name):
return element
raise KeyError(
"No {} with ID or name '{}' found before the referencing {}".format(
element_name, ref, element_name
)
)
lookup_by_id_or_name.__doc__ = doc
return lookup_by_id_or_name
def _get_default_unit_format(config):
"""
Get the default unit format as specified in the VOTable spec.
"""
# The unit format changed between VOTable versions 1.3 and 1.4,
# see issue #10791.
if config["version_1_4_or_later"]:
return "vounit"
else:
return "cds"
def _get_unit_format(config):
"""
Get the unit format based on the configuration.
"""
if config.get("unit_format") is None:
format = _get_default_unit_format(config)
else:
format = config["unit_format"]
return format
######################################################################
# ATTRIBUTE CHECKERS
def check_astroyear(year, field, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*year* is not a valid astronomical year as defined by the VOTABLE
standard.
Parameters
----------
year : str
An astronomical year string
field : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if year is not None and re.match(r"^[JB]?[0-9]+([.][0-9]*)?$", year) is None:
warn_or_raise(W07, W07, (field, year), config, pos)
return False
return True
def check_string(string, attr_name, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*string* is not a string or Unicode string.
Parameters
----------
string : str
An astronomical year string
attr_name : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if string is not None and not isinstance(string, str):
warn_or_raise(W08, W08, attr_name, config, pos)
return False
return True
def resolve_id(ID, id, config=None, pos=None):
if ID is None and id is not None:
warn_or_raise(W09, W09, (), config, pos)
return id
return ID
def check_ucd(ucd, config=None, pos=None):
"""
Warns or raises a
`~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not
a valid `unified content descriptor`_ string as defined by the
VOTABLE standard.
Parameters
----------
ucd : str
A UCD string.
config, pos : optional
Information about the source of the value
"""
if config is None:
config = {}
if config.get("version_1_1_or_later"):
try:
ucd_mod.parse_ucd(
ucd,
check_controlled_vocabulary=config.get("version_1_2_or_later", False),
has_colon=config.get("version_1_2_or_later", False),
)
except ValueError as e:
# This weird construction is for Python 3 compatibility
if config.get("verify", "ignore") == "exception":
vo_raise(W06, (ucd, str(e)), config, pos)
elif config.get("verify", "ignore") == "warn":
vo_warn(W06, (ucd, str(e)), config, pos)
return False
else:
return False
return True
######################################################################
# PROPERTY MIXINS
class _IDProperty:
@property
def ID(self):
"""
The XML ID_ of the element. May be `None` or a string
conforming to XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@ID.deleter
def ID(self):
self._ID = None
class _NameProperty:
@property
def name(self):
"""An optional name for the element."""
return self._name
@name.setter
def name(self, name):
xmlutil.check_token(name, "name", self._config, self._pos)
self._name = name
@name.deleter
def name(self):
self._name = None
class _XtypeProperty:
@property
def xtype(self):
"""Extended data type information."""
return self._xtype
@xtype.setter
def xtype(self, xtype):
if xtype is not None and not self._config.get("version_1_2_or_later"):
warn_or_raise(
W28, W28, ("xtype", self._element_name, "1.2"), self._config, self._pos
)
check_string(xtype, "xtype", self._config, self._pos)
self._xtype = xtype
@xtype.deleter
def xtype(self):
self._xtype = None
class _UtypeProperty:
_utype_in_v1_2 = False
@property
def utype(self):
"""The usage-specific or `unique type`_ of the element."""
return self._utype
@utype.setter
def utype(self, utype):
if (
self._utype_in_v1_2
and utype is not None
and not self._config.get("version_1_2_or_later")
):
warn_or_raise(
W28, W28, ("utype", self._element_name, "1.2"), self._config, self._pos
)
check_string(utype, "utype", self._config, self._pos)
self._utype = utype
@utype.deleter
def utype(self):
self._utype = None
class _UcdProperty:
_ucd_in_v1_2 = False
@property
def ucd(self):
"""The `unified content descriptor`_ for the element."""
return self._ucd
@ucd.setter
def ucd(self, ucd):
if ucd is not None and ucd.strip() == "":
ucd = None
if ucd is not None:
if self._ucd_in_v1_2 and not self._config.get("version_1_2_or_later"):
warn_or_raise(
W28,
W28,
("ucd", self._element_name, "1.2"),
self._config,
self._pos,
)
check_ucd(ucd, self._config, self._pos)
self._ucd = ucd
@ucd.deleter
def ucd(self):
self._ucd = None
class _DescriptionProperty:
@property
def description(self):
"""
An optional string describing the element. Corresponds to the
DESCRIPTION_ element.
"""
return self._description
@description.setter
def description(self, description):
self._description = description
@description.deleter
def description(self):
self._description = None
######################################################################
# ELEMENT CLASSES
class Element:
"""
A base class for all classes that represent XML elements in the
VOTABLE file.
"""
_element_name = ""
_attr_list = []
def _add_unknown_tag(self, iterator, tag, data, config, pos):
warn_or_raise(W10, W10, tag, config, pos)
def _ignore_add(self, iterator, tag, data, config, pos):
warn_unknown_attrs(tag, data.keys(), config, pos)
def _add_definitions(self, iterator, tag, data, config, pos):
if config.get("version_1_1_or_later"):
warn_or_raise(W22, W22, (), config, pos)
warn_unknown_attrs(tag, data.keys(), config, pos)
def parse(self, iterator, config):
"""
For internal use. Parse the XML content of the children of the
element.
Parameters
----------
iterator : xml iterable
An iterator over XML elements as returned by
`~astropy.utils.xml.iterparser.get_xml_iterator`.
config : dict
The configuration dictionary that affects how certain
elements are read.
Returns
-------
self : `~astropy.io.votable.tree.Element`
Returns self as a convenience.
"""
raise NotImplementedError()
def to_xml(self, w, **kwargs):
"""
For internal use. Output the element to XML.
Parameters
----------
w : astropy.utils.xml.writer.XMLWriter object
An XML writer to write to.
**kwargs : dict
Any configuration parameters to control the output.
"""
raise NotImplementedError()
class SimpleElement(Element):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
Element.__init__(self)
def __repr__(self):
buff = io.StringIO()
SimpleElement.to_xml(self, XMLWriter(buff))
return buff.getvalue().strip()
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name, attrib=w.object_attrs(self, self._attr_list))
class SimpleElementWithContent(SimpleElement):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
SimpleElement.__init__(self)
self._content = None
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
if data:
self.content = data
break
return self
def to_xml(self, w, **kwargs):
w.element(
self._element_name,
self._content,
attrib=w.object_attrs(self, self._attr_list),
)
@property
def content(self):
"""The content of the element."""
return self._content
@content.setter
def content(self, content):
check_string(content, "content", self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
class Link(SimpleElement, _IDProperty):
"""
LINK_ elements: used to reference external documents and servers through a URI.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = [
"ID",
"content_role",
"content_type",
"title",
"value",
"href",
"action",
]
_element_name = "LINK"
def __init__(
self,
ID=None,
title=None,
value=None,
href=None,
action=None,
id=None,
config=None,
pos=None,
**kwargs,
):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
content_role = kwargs.get("content-role") or kwargs.get("content_role")
content_type = kwargs.get("content-type") or kwargs.get("content_type")
if "gref" in kwargs:
warn_or_raise(W11, W11, (), config, pos)
self.ID = resolve_id(ID, id, config, pos)
self.content_role = content_role
self.content_type = content_type
self.title = title
self.value = value
self.href = href
self.action = action
warn_unknown_attrs(
"LINK",
kwargs.keys(),
config,
pos,
["content-role", "content_role", "content-type", "content_type", "gref"],
)
@property
def content_role(self):
"""Defines the MIME role of the referenced object.
Must be one of:
None, 'query', 'hints', 'doc', 'location' or 'type'
"""
return self._content_role
@content_role.setter
def content_role(self, content_role):
if (
content_role == "type" and not self._config["version_1_3_or_later"]
) or content_role not in (None, "query", "hints", "doc", "location"):
vo_warn(W45, (content_role,), self._config, self._pos)
self._content_role = content_role
@content_role.deleter
def content_role(self):
self._content_role = None
@property
def content_type(self):
"""Defines the MIME content type of the referenced object."""
return self._content_type
@content_type.setter
def content_type(self, content_type):
xmlutil.check_mime_content_type(content_type, self._config, self._pos)
self._content_type = content_type
@content_type.deleter
def content_type(self):
self._content_type = None
@property
def href(self):
"""
A URI to an arbitrary protocol. The vo package only supports
http and anonymous ftp.
"""
return self._href
@href.setter
def href(self, href):
xmlutil.check_anyuri(href, self._config, self._pos)
self._href = href
@href.deleter
def href(self):
self._href = None
def to_table_column(self, column):
meta = {}
for key in self._attr_list:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
column.meta.setdefault("links", [])
column.meta["links"].append(meta)
@classmethod
def from_table_column(cls, d):
return cls(**d)
class Info(SimpleElementWithContent, _IDProperty, _XtypeProperty, _UtypeProperty):
"""
INFO_ elements: arbitrary key-value pairs for extensions to the standard.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_element_name = "INFO"
_attr_list_11 = ["ID", "name", "value"]
_attr_list_12 = _attr_list_11 + ["xtype", "ref", "unit", "ucd", "utype"]
_utype_in_v1_2 = True
def __init__(
self,
ID=None,
name=None,
value=None,
id=None,
xtype=None,
ref=None,
unit=None,
ucd=None,
utype=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElementWithContent.__init__(self)
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
self.value = value
self.xtype = xtype
self.ref = ref
self.unit = unit
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs("INFO", ["xtype"], config, pos)
if ref is not None:
warn_unknown_attrs("INFO", ["ref"], config, pos)
if unit is not None:
warn_unknown_attrs("INFO", ["unit"], config, pos)
if ucd is not None:
warn_unknown_attrs("INFO", ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs("INFO", ["utype"], config, pos)
warn_unknown_attrs("INFO", extra.keys(), config, pos)
@property
def name(self):
"""[*required*] The key of the key-value pair."""
return self._name
@name.setter
def name(self, name):
if name is None:
warn_or_raise(W35, W35, "name", self._config, self._pos)
xmlutil.check_token(name, "name", self._config, self._pos)
self._name = name
@property
def value(self):
"""
[*required*] The value of the key-value pair. (Always stored
as a string or unicode string).
"""
return self._value
@value.setter
def value(self, value):
if value is None:
warn_or_raise(W35, W35, "value", self._config, self._pos)
check_string(value, "value", self._config, self._pos)
self._value = value
@property
def content(self):
"""The content inside the INFO element."""
return self._content
@content.setter
def content(self, content):
check_string(content, "content", self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
@property
def ref(self):
"""
Refer to another INFO_ element by ID_, defined previously in
the document.
"""
return self._ref
@ref.setter
def ref(self, ref):
if ref is not None and not self._config.get("version_1_2_or_later"):
warn_or_raise(W28, W28, ("ref", "INFO", "1.2"), self._config, self._pos)
xmlutil.check_id(ref, "ref", self._config, self._pos)
# TODO: actually apply the reference
# if ref is not None:
# try:
# other = self._votable.get_values_by_id(ref, before=self)
# except KeyError:
# vo_raise(
# "VALUES ref='%s', which has not already been defined." %
# self.ref, self._config, self._pos, KeyError)
# self.null = other.null
# self.type = other.type
# self.min = other.min
# self.min_inclusive = other.min_inclusive
# self.max = other.max
# self.max_inclusive = other.max_inclusive
# self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the INFO_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
if not self._config.get("version_1_2_or_later"):
warn_or_raise(W28, W28, ("unit", "INFO", "1.2"), self._config, self._pos)
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(unit, format=default_format, parse_strict="silent")
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,), self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(unit, format=format, parse_strict="silent")
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if "unit" in attrib:
attrib["unit"] = self.unit.to_string("cds")
w.element(self._element_name, self._content, attrib=attrib)
class Values(Element, _IDProperty):
"""
VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
votable,
field,
ID=None,
null=None,
ref=None,
type="legal",
id=None,
config=None,
pos=None,
**extras,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._votable = votable
self._field = field
self.ID = resolve_id(ID, id, config, pos)
self.null = null
self._ref = ref
self.type = type
self.min = None
self.max = None
self.min_inclusive = True
self.max_inclusive = True
self._options = []
warn_unknown_attrs("VALUES", extras.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
self.to_xml(XMLWriter(buff))
return buff.getvalue().strip()
@property
def null(self):
"""
For integral datatypes, *null* is used to define the value
used for missing values.
"""
return self._null
@null.setter
def null(self, null):
if null is not None and isinstance(null, str):
try:
null_val = self._field.converter.parse_scalar(
null, self._config, self._pos
)[0]
except Exception:
warn_or_raise(W36, W36, null, self._config, self._pos)
null_val = self._field.converter.parse_scalar(
"0", self._config, self._pos
)[0]
else:
null_val = null
self._null = null_val
@null.deleter
def null(self):
self._null = None
@property
def type(self):
"""Defines the applicability of the domain defined by this VALUES_ element [*required*].
Must be one of the following strings:
- 'legal': The domain of this column applies in general to
this datatype. (default)
- 'actual': The domain of this column applies only to the
data enclosed in the parent table.
"""
return self._type
@type.setter
def type(self, type):
if type not in ("legal", "actual"):
vo_raise(E08, type, self._config, self._pos)
self._type = type
@property
def ref(self):
"""
Refer to another VALUES_ element by ID_, defined previously in
the document, for MIN/MAX/OPTION information.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
if ref is not None:
try:
other = self._votable.get_values_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ("VALUES", self.ref), self._config, self._pos)
ref = None
else:
self.null = other.null
self.type = other.type
self.min = other.min
self.min_inclusive = other.min_inclusive
self.max = other.max
self.max_inclusive = other.max_inclusive
self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def min(self):
"""
The minimum value of the domain. See :attr:`min_inclusive`.
"""
return self._min
@min.setter
def min(self, min):
if hasattr(self._field, "converter") and min is not None:
self._min = self._field.converter.parse(min)[0]
else:
self._min = min
@min.deleter
def min(self):
self._min = None
@property
def min_inclusive(self):
"""When `True`, the domain includes the minimum value."""
return self._min_inclusive
@min_inclusive.setter
def min_inclusive(self, inclusive):
if inclusive == "yes":
self._min_inclusive = True
elif inclusive == "no":
self._min_inclusive = False
else:
self._min_inclusive = bool(inclusive)
@min_inclusive.deleter
def min_inclusive(self):
self._min_inclusive = True
@property
def max(self):
"""
The maximum value of the domain. See :attr:`max_inclusive`.
"""
return self._max
@max.setter
def max(self, max):
if hasattr(self._field, "converter") and max is not None:
self._max = self._field.converter.parse(max)[0]
else:
self._max = max
@max.deleter
def max(self):
self._max = None
@property
def max_inclusive(self):
"""When `True`, the domain includes the maximum value."""
return self._max_inclusive
@max_inclusive.setter
def max_inclusive(self, inclusive):
if inclusive == "yes":
self._max_inclusive = True
elif inclusive == "no":
self._max_inclusive = False
else:
self._max_inclusive = bool(inclusive)
@max_inclusive.deleter
def max_inclusive(self):
self._max_inclusive = True
@property
def options(self):
"""
A list of string key-value tuples defining other OPTION
elements for the domain. All options are ignored -- they are
stored for round-tripping purposes only.
"""
return self._options
def parse(self, iterator, config):
if self.ref is not None:
for start, tag, data, pos in iterator:
if start:
warn_or_raise(W44, W44, tag, config, pos)
else:
if tag != "VALUES":
warn_or_raise(W44, W44, tag, config, pos)
break
else:
for start, tag, data, pos in iterator:
if start:
if tag == "MIN":
if "value" not in data:
vo_raise(E09, "MIN", config, pos)
self.min = data["value"]
self.min_inclusive = data.get("inclusive", "yes")
warn_unknown_attrs(
"MIN", data.keys(), config, pos, ["value", "inclusive"]
)
elif tag == "MAX":
if "value" not in data:
vo_raise(E09, "MAX", config, pos)
self.max = data["value"]
self.max_inclusive = data.get("inclusive", "yes")
warn_unknown_attrs(
"MAX", data.keys(), config, pos, ["value", "inclusive"]
)
elif tag == "OPTION":
if "value" not in data:
vo_raise(E09, "OPTION", config, pos)
xmlutil.check_token(data.get("name"), "name", config, pos)
self.options.append((data.get("name"), data.get("value")))
warn_unknown_attrs(
"OPTION", data.keys(), config, pos, ["value", "name"]
)
elif tag == "VALUES":
break
return self
def is_defaults(self):
"""
Are the settings on this ``VALUE`` element all the same as the
XML defaults?.
"""
# If there's nothing meaningful or non-default to write,
# don't write anything.
return (
self.ref is None
and self.null is None
and self.ID is None
and self.max is None
and self.min is None
and self.options == []
)
def to_xml(self, w, **kwargs):
def yes_no(value):
if value:
return "yes"
return "no"
if self.is_defaults():
return
if self.ref is not None:
w.element("VALUES", attrib=w.object_attrs(self, ["ref"]))
else:
with w.tag("VALUES", attrib=w.object_attrs(self, ["ID", "null", "ref"])):
if self.min is not None:
w.element(
"MIN",
value=self._field.converter.output(self.min, False),
inclusive=yes_no(self.min_inclusive),
)
if self.max is not None:
w.element(
"MAX",
value=self._field.converter.output(self.max, False),
inclusive=yes_no(self.max_inclusive),
)
for name, value in self.options:
w.element("OPTION", name=name, value=value)
def to_table_column(self, column):
# Have the ref filled in here
meta = {}
for key in ["ID", "null"]:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if self.min is not None:
meta["min"] = {"value": self.min, "inclusive": self.min_inclusive}
if self.max is not None:
meta["max"] = {"value": self.max, "inclusive": self.max_inclusive}
if len(self.options):
meta["options"] = dict(self.options)
column.meta["values"] = meta
def from_table_column(self, column):
if column.info.meta is None or "values" not in column.info.meta:
return
meta = column.info.meta["values"]
for key in ["ID", "null"]:
val = meta.get(key, None)
if val is not None:
setattr(self, key, val)
if "min" in meta:
self.min = meta["min"]["value"]
self.min_inclusive = meta["min"]["inclusive"]
if "max" in meta:
self.max = meta["max"]["value"]
self.max_inclusive = meta["max"]["inclusive"]
if "options" in meta:
self._options = list(meta["options"].items())
class Field(
SimpleElement,
_IDProperty,
_NameProperty,
_XtypeProperty,
_UtypeProperty,
_UcdProperty,
):
"""
FIELD_ element: describes the datatype of a particular column of data.
The keyword arguments correspond to setting members of the same
name, documented below.
If *ID* is provided, it is used for the column name in the
resulting recarray of the table. If no *ID* is provided, *name*
is used instead. If neither is provided, an exception will be
raised.
"""
_attr_list_11 = [
"ID",
"name",
"datatype",
"arraysize",
"ucd",
"unit",
"width",
"precision",
"utype",
"ref",
]
_attr_list_12 = _attr_list_11 + ["xtype"]
_element_name = "FIELD"
def __init__(
self,
votable,
ID=None,
name=None,
datatype=None,
arraysize=None,
ucd=None,
unit=None,
width=None,
precision=None,
utype=None,
ref=None,
type=None,
id=None,
xtype=None,
config=None,
pos=None,
**extra,
):
if config is None:
if hasattr(votable, "_get_version_checks"):
config = votable._get_version_checks()
else:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs(self._element_name, ["xtype"], config, pos)
# TODO: REMOVE ME ----------------------------------------
# This is a terrible hack to support Simple Image Access
# Protocol results from https://astroarchive.noirlab.edu/ . It creates a field
# for the coordinate projection type of type "double", which
# actually contains character data. We have to hack the field
# to store character data, or we can't read it in. A warning
# will be raised when this happens.
if (
config.get("verify", "ignore") != "exception"
and name == "cprojection"
and ID == "cprojection"
and ucd == "VOX:WCS_CoordProjection"
and datatype == "double"
):
datatype = "char"
arraysize = "3"
vo_warn(W40, (), config, pos)
# ----------------------------------------
self.description = None
self._votable = votable
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
if name is None:
if self._element_name == "PARAM" and not config.get("version_1_1_or_later"):
pass
else:
warn_or_raise(W15, W15, self._element_name, config, pos)
self.name = self.ID
if self._ID is None and name is None:
vo_raise(W12, self._element_name, config, pos)
datatype_mapping = {
"string": "char",
"unicodeString": "unicodeChar",
"int16": "short",
"int32": "int",
"int64": "long",
"float32": "float",
"float64": "double",
# The following appear in some Vizier tables
"unsignedInt": "long",
"unsignedShort": "int",
}
datatype_mapping.update(config.get("datatype_mapping", {}))
if datatype in datatype_mapping:
warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]), config, pos)
datatype = datatype_mapping[datatype]
self.ref = ref
self.datatype = datatype
self.arraysize = arraysize
self.ucd = ucd
self.unit = unit
self.width = width
self.precision = precision
self.utype = utype
self.type = type
self._links = HomogeneousList(Link)
self.title = self.name
self.values = Values(self._votable, self)
self.xtype = xtype
self._setup(config, pos)
warn_unknown_attrs(self._element_name, extra.keys(), config, pos)
@classmethod
def uniqify_names(cls, fields):
"""
Make sure that all names and titles in a list of fields are
unique, by appending numbers if necessary.
"""
unique = {}
for field in fields:
i = 2
new_id = field.ID
while new_id in unique:
new_id = field.ID + f"_{i:d}"
i += 1
if new_id != field.ID:
vo_warn(W32, (field.ID, new_id), field._config, field._pos)
field.ID = new_id
unique[new_id] = field.ID
for field in fields:
i = 2
if field.name is None:
new_name = field.ID
implicit = True
else:
new_name = field.name
implicit = False
if new_name != field.ID:
while new_name in unique:
new_name = field.name + f" {i:d}"
i += 1
if not implicit and new_name != field.name:
vo_warn(W33, (field.name, new_name), field._config, field._pos)
field._unique_name = new_name
unique[new_name] = field.name
def _setup(self, config, pos):
if self.values._ref is not None:
self.values.ref = self.values._ref
self.converter = converters.get_converter(self, config, pos)
@property
def datatype(self):
"""The datatype of the column [*required*].
Valid values (as defined by the spec) are:
'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',
'char', 'unicodeChar', 'float', 'double', 'floatComplex', or
'doubleComplex'
Many VOTABLE files in the wild use 'string' instead of 'char',
so that is also a valid option, though 'string' will always be
converted to 'char' when writing the file back out.
"""
return self._datatype
@datatype.setter
def datatype(self, datatype):
if datatype is None:
if self._config.get("version_1_1_or_later"):
warn_or_raise(E10, E10, self._element_name, self._config, self._pos)
datatype = "char"
if datatype not in converters.converter_mapping:
vo_raise(E06, (datatype, self.ID), self._config, self._pos)
self._datatype = datatype
@property
def precision(self):
"""
Along with :attr:`width`, defines the `numerical accuracy`_
associated with the data. These values are used to limit the
precision when writing floating point values back to the XML
file. Otherwise, it is purely informational -- the Numpy
recarray containing the data itself does not use this
information.
"""
return self._precision
@precision.setter
def precision(self, precision):
if precision is not None and not re.match(r"^[FE]?[0-9]+$", precision):
vo_raise(E11, precision, self._config, self._pos)
self._precision = precision
@precision.deleter
def precision(self):
self._precision = None
@property
def width(self):
"""
Along with :attr:`precision`, defines the `numerical
accuracy`_ associated with the data. These values are used to
limit the precision when writing floating point values back to
the XML file. Otherwise, it is purely informational -- the
Numpy recarray containing the data itself does not use this
information.
"""
return self._width
@width.setter
def width(self, width):
if width is not None:
width = int(width)
if width <= 0:
vo_raise(E12, width, self._config, self._pos)
self._width = width
@width.deleter
def width(self):
self._width = None
# ref on FIELD and PARAM behave differently than elsewhere -- here
# they're just informational, such as to refer to a coordinate
# system.
@property
def ref(self):
"""
On FIELD_ elements, ref is used only for informational
purposes, for example to refer to a COOSYS_ or TIMESYS_ element.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the FIELD_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(unit, format=default_format, parse_strict="silent")
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,), self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(unit, format=format, parse_strict="silent")
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
@property
def arraysize(self):
"""
Specifies the size of the multidimensional array if this
FIELD_ contains more than a single value.
See `multidimensional arrays`_.
"""
return self._arraysize
@arraysize.setter
def arraysize(self, arraysize):
if arraysize is not None and not re.match(
r"^([0-9]+x)*[0-9]*[*]?(s\W)?$", arraysize
):
vo_raise(E13, arraysize, self._config, self._pos)
self._arraysize = arraysize
@arraysize.deleter
def arraysize(self):
self._arraysize = None
@property
def type(self):
"""
The type attribute on FIELD_ elements is reserved for future
extensions.
"""
return self._type
@type.setter
def type(self, type):
self._type = type
@type.deleter
def type(self):
self._type = None
@property
def values(self):
"""
A :class:`Values` instance (or `None`) defining the domain
of the column.
"""
return self._values
@values.setter
def values(self, values):
assert values is None or isinstance(values, Values)
self._values = values
@values.deleter
def values(self):
self._values = None
@property
def links(self):
"""
A list of :class:`Link` instances used to reference more
details about the meaning of the FIELD_. This is purely
informational and is not used by the `astropy.io.votable`
package.
"""
return self._links
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start:
if tag == "VALUES":
self.values.__init__(
self._votable, self, config=config, pos=pos, **data
)
self.values.parse(iterator, config)
elif tag == "LINK":
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
elif tag == "DESCRIPTION":
warn_unknown_attrs("DESCRIPTION", data.keys(), config, pos)
elif tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, self._element_name, config, pos)
self.description = data or None
elif tag == self._element_name:
break
if self.description is not None:
self.title = " ".join(x.strip() for x in self.description.splitlines())
else:
self.title = self.name
self._setup(config, pos)
return self
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if "unit" in attrib:
attrib["unit"] = self.unit.to_string("cds")
with w.tag(self._element_name, attrib=attrib):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
if not self.values.is_defaults():
self.values.to_xml(w, **kwargs)
for link in self.links:
link.to_xml(w, **kwargs)
def to_table_column(self, column):
"""
Sets the attributes of a given `astropy.table.Column` instance
to match the information in this `Field`.
"""
for key in ["ucd", "width", "precision", "utype", "xtype"]:
val = getattr(self, key, None)
if val is not None:
column.meta[key] = val
if not self.values.is_defaults():
self.values.to_table_column(column)
for link in self.links:
link.to_table_column(column)
if self.description is not None:
column.description = self.description
if self.unit is not None:
# TODO: Use units framework when it's available
column.unit = self.unit
if (
isinstance(self.converter, converters.FloatingPoint)
and self.converter.output_format != "{!r:>}"
):
column.format = self.converter.output_format
elif isinstance(self.converter, converters.Char):
column.info.meta["_votable_string_dtype"] = "char"
elif isinstance(self.converter, converters.UnicodeChar):
column.info.meta["_votable_string_dtype"] = "unicodeChar"
@classmethod
def from_table_column(cls, votable, column):
"""
Restores a `Field` instance from a given
`astropy.table.Column` instance.
"""
kwargs = {}
meta = column.info.meta
if meta:
for key in ["ucd", "width", "precision", "utype", "xtype"]:
val = meta.get(key, None)
if val is not None:
kwargs[key] = val
# TODO: Use the unit framework when available
if column.info.unit is not None:
kwargs["unit"] = column.info.unit
kwargs["name"] = column.info.name
result = converters.table_column_to_votable_datatype(column)
kwargs.update(result)
field = cls(votable, **kwargs)
if column.info.description is not None:
field.description = column.info.description
field.values.from_table_column(column)
if meta and "links" in meta:
for link in meta["links"]:
field.links.append(Link.from_table_column(link))
# TODO: Parse format into precision and width
return field
class Param(Field):
"""
PARAM_ element: constant-valued columns in the data.
:class:`Param` objects are a subclass of :class:`Field`, and have
all of its methods and members. Additionally, it defines :attr:`value`.
"""
_attr_list_11 = Field._attr_list_11 + ["value"]
_attr_list_12 = Field._attr_list_12 + ["value"]
_element_name = "PARAM"
def __init__(
self,
votable,
ID=None,
name=None,
value=None,
datatype=None,
arraysize=None,
ucd=None,
unit=None,
width=None,
precision=None,
utype=None,
type=None,
id=None,
config=None,
pos=None,
**extra,
):
self._value = value
Field.__init__(
self,
votable,
ID=ID,
name=name,
datatype=datatype,
arraysize=arraysize,
ucd=ucd,
unit=unit,
precision=precision,
utype=utype,
type=type,
id=id,
config=config,
pos=pos,
**extra,
)
@property
def value(self):
"""
[*required*] The constant value of the parameter. Its type is
determined by the :attr:`~Field.datatype` member.
"""
return self._value
@value.setter
def value(self, value):
if value is None:
value = ""
if isinstance(value, str):
self._value = self.converter.parse(value, self._config, self._pos)[0]
else:
self._value = value
def _setup(self, config, pos):
Field._setup(self, config, pos)
self.value = self._value
def to_xml(self, w, **kwargs):
tmp_value = self._value
self._value = self.converter.output(tmp_value, False)
# We must always have a value
if self._value is None:
self._value = ""
Field.to_xml(self, w, **kwargs)
self._value = tmp_value
class CooSys(SimpleElement):
"""
COOSYS_ element: defines a coordinate system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ["ID", "equinox", "epoch", "system"]
_element_name = "COOSYS"
def __init__(
self,
ID=None,
equinox=None,
epoch=None,
system=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
# COOSYS was deprecated in 1.2 but then re-instated in 1.3
if config.get("version_1_2_or_later") and not config.get(
"version_1_3_or_later"
):
warn_or_raise(W27, W27, (), config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.equinox = equinox
self.epoch = epoch
self.system = system
warn_unknown_attrs("COOSYS", extra.keys(), config, pos)
@property
def ID(self):
"""
[*required*] The XML ID of the COOSYS_ element, used for
cross-referencing. May be `None` or a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if self._config.get("version_1_1_or_later"):
if ID is None:
vo_raise(E15, (), self._config, self._pos)
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@property
def system(self):
"""Specifies the type of coordinate system.
Valid choices are:
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', or 'geo_app'
"""
return self._system
@system.setter
def system(self, system):
if system not in (
"eq_FK4",
"eq_FK5",
"ICRS",
"ecl_FK4",
"ecl_FK5",
"galactic",
"supergalactic",
"xy",
"barycentric",
"geo_app",
):
warn_or_raise(E16, E16, system, self._config, self._pos)
self._system = system
@system.deleter
def system(self):
self._system = None
@property
def equinox(self):
"""
A parameter required to fix the equatorial or ecliptic systems
(as e.g. "J2000" as the default "eq_FK5" or "B1950" as the
default "eq_FK4").
"""
return self._equinox
@equinox.setter
def equinox(self, equinox):
check_astroyear(equinox, "equinox", self._config, self._pos)
self._equinox = equinox
@equinox.deleter
def equinox(self):
self._equinox = None
@property
def epoch(self):
"""
Specifies the epoch of the positions. It must be a string
specifying an astronomical year.
"""
return self._epoch
@epoch.setter
def epoch(self, epoch):
check_astroyear(epoch, "epoch", self._config, self._pos)
self._epoch = epoch
@epoch.deleter
def epoch(self):
self._epoch = None
class TimeSys(SimpleElement):
"""
TIMESYS_ element: defines a time system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ["ID", "timeorigin", "timescale", "refposition"]
_element_name = "TIMESYS"
def __init__(
self,
ID=None,
timeorigin=None,
timescale=None,
refposition=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
# TIMESYS is supported starting in version 1.4
if not config["version_1_4_or_later"]:
warn_or_raise(W54, W54, config["version"], config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.timeorigin = timeorigin
self.timescale = timescale
self.refposition = refposition
warn_unknown_attrs(
"TIMESYS",
extra.keys(),
config,
pos,
["ID", "timeorigin", "timescale", "refposition"],
)
@property
def ID(self):
"""
[*required*] The XML ID of the TIMESYS_ element, used for
cross-referencing. Must be a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if ID is None:
vo_raise(E22, (), self._config, self._pos)
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@property
def timeorigin(self):
"""
Specifies the time origin of the time coordinate,
given as a Julian Date for the the time scale and
reference point defined. It is usually given as a
floating point literal; for convenience, the magic
strings "MJD-origin" (standing for 2400000.5) and
"JD-origin" (standing for 0) are also allowed.
The timeorigin attribute MUST be given unless the
time’s representation contains a year of a calendar
era, in which case it MUST NOT be present. In VOTables,
these representations currently are Gregorian calendar
years with xtype="timestamp", or years in the Julian
or Besselian calendar when a column has yr, a, or Ba as
its unit and no time origin is given.
"""
return self._timeorigin
@timeorigin.setter
def timeorigin(self, timeorigin):
if (
timeorigin is not None
and timeorigin != "MJD-origin"
and timeorigin != "JD-origin"
):
try:
timeorigin = float(timeorigin)
except ValueError:
warn_or_raise(E23, E23, timeorigin, self._config, self._pos)
self._timeorigin = timeorigin
@timeorigin.deleter
def timeorigin(self):
self._timeorigin = None
@property
def timescale(self):
"""
[*required*] String specifying the time scale used. Values
should be taken from the IVOA timescale vocabulary (documented
at http://www.ivoa.net/rdf/timescale).
"""
return self._timescale
@timescale.setter
def timescale(self, timescale):
self._timescale = timescale
@timescale.deleter
def timescale(self):
self._timescale = None
@property
def refposition(self):
"""
[*required*] String specifying the reference position. Values
should be taken from the IVOA refposition vocabulary (documented
at http://www.ivoa.net/rdf/refposition).
"""
return self._refposition
@refposition.setter
def refposition(self, refposition):
self._refposition = refposition
@refposition.deleter
def refposition(self):
self._refposition = None
class FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.
"""
_attr_list_11 = ["ref"]
_attr_list_12 = _attr_list_11 + ["ucd", "utype"]
_element_name = "FIELDref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(
self, table, ref, ucd=None, utype=None, config=None, pos=None, **extra
):
"""
*table* is the :class:`Table` object that this :class:`FieldRef`
is a member of.
*ref* is the ID to reference a :class:`Field` object defined
elsewhere.
"""
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ["utype"], config, pos)
@property
def ref(self):
"""The ID_ of the FIELD_ that this FIELDref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Field` instance that this :class:`FieldRef`
references.
"""
for field in self._table._votable.iter_fields_and_params():
if isinstance(field, Field) and field.ID == self.ref:
return field
vo_raise(f"No field named '{self.ref}'", self._config, self._pos, KeyError)
class ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
It contains the following publicly-accessible members:
*ref*: An XML ID referring to a <PARAM> element.
"""
_attr_list_11 = ["ref"]
_attr_list_12 = _attr_list_11 + ["ucd", "utype"]
_element_name = "PARAMref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ["utype"], config, pos)
@property
def ref(self):
"""The ID_ of the PARAM_ that this PARAMref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Param` instance that this :class:``PARAMref``
references.
"""
for param in self._table._votable.iter_fields_and_params():
if isinstance(param, Param) and param.ID == self.ref:
return param
vo_raise(f"No params named '{self.ref}'", self._config, self._pos, KeyError)
class Group(
Element,
_IDProperty,
_NameProperty,
_UtypeProperty,
_UcdProperty,
_DescriptionProperty,
):
"""
GROUP_ element: groups FIELD_ and PARAM_ elements.
This information is currently ignored by the vo package---that is
the columns in the recarray are always flat---but the grouping
information is stored so that it can be written out again to the
XML file.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
table,
ID=None,
name=None,
ref=None,
ucd=None,
utype=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
self.ref = ref
self.ucd = ucd
self.utype = utype
self.description = None
self._entries = HomogeneousList((FieldRef, ParamRef, Group, Param))
warn_unknown_attrs("GROUP", extra.keys(), config, pos)
def __repr__(self):
return f"<GROUP>... {len(self._entries)} entries ...</GROUP>"
@property
def ref(self):
"""
Currently ignored, as it's not clear from the spec how this is
meant to work.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def entries(self):
"""
[read-only] A list of members of the GROUP_. This list may
only contain objects of type :class:`Param`, :class:`Group`,
:class:`ParamRef` and :class:`FieldRef`.
"""
return self._entries
def _add_fieldref(self, iterator, tag, data, config, pos):
fieldref = FieldRef(self._table, config=config, pos=pos, **data)
self.entries.append(fieldref)
def _add_paramref(self, iterator, tag, data, config, pos):
paramref = ParamRef(self._table, config=config, pos=pos, **data)
self.entries.append(paramref)
def _add_param(self, iterator, tag, data, config, pos):
if isinstance(self._table, VOTableFile):
votable = self._table
else:
votable = self._table._votable
param = Param(votable, config=config, pos=pos, **data)
self.entries.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self._table, config=config, pos=pos, **data)
self.entries.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
tag_mapping = {
"FIELDref": self._add_fieldref,
"PARAMref": self._add_paramref,
"PARAM": self._add_param,
"GROUP": self._add_group,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "GROUP", config, pos)
self.description = data or None
elif tag == "GROUP":
break
return self
def to_xml(self, w, **kwargs):
with w.tag(
"GROUP", attrib=w.object_attrs(self, ["ID", "name", "ref", "ucd", "utype"])
):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for entry in self.entries:
entry.to_xml(w, **kwargs)
def iter_fields_and_params(self):
"""
Recursively iterate over all :class:`Param` elements in this
:class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Param):
yield entry
elif isinstance(entry, Group):
yield from entry.iter_fields_and_params()
def iter_groups(self):
"""
Recursively iterate over all sub-:class:`Group` instances in
this :class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Group):
yield entry
yield from entry.iter_groups()
class Table(Element, _IDProperty, _NameProperty, _UcdProperty, _DescriptionProperty):
"""
TABLE_ element: optionally contains data.
It contains the following publicly-accessible and mutable
attribute:
*array*: A Numpy masked array of the data itself, where each
row is a row of votable data, and columns are named and typed
based on the <FIELD> elements of the table. The mask is
parallel to the data array, except for variable-length fields.
For those fields, the numpy array's column type is "object"
(``"O"``), and another masked array is stored there.
If the Table contains no data, (for example, its enclosing
:class:`Resource` has :attr:`~Resource.type` == 'meta') *array*
will have zero-length.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
votable,
ID=None,
name=None,
ref=None,
ucd=None,
utype=None,
nrows=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
self._empty = False
Element.__init__(self)
self._votable = votable
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
xmlutil.check_id(ref, "ref", config, pos)
self._ref = ref
self.ucd = ucd
self.utype = utype
if nrows is not None:
nrows = int(nrows)
if nrows < 0:
raise ValueError("'nrows' cannot be negative.")
self._nrows = nrows
self.description = None
self.format = "tabledata"
self._fields = HomogeneousList(Field)
self._params = HomogeneousList(Param)
self._groups = HomogeneousList(Group)
self._links = HomogeneousList(Link)
self._infos = HomogeneousList(Info)
self.array = ma.array([])
warn_unknown_attrs("TABLE", extra.keys(), config, pos)
def __repr__(self):
return repr(self.to_table())
def __bytes__(self):
return bytes(self.to_table())
def __str__(self):
return str(self.to_table())
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, ref):
"""
Refer to another TABLE, previously defined, by the *ref* ID_
for all metadata (FIELD_, PARAM_ etc.) information.
"""
# When the ref changes, we want to verify that it will work
# by actually going and looking for the referenced table.
# If found, set a bunch of properties in this table based
# on the other one.
xmlutil.check_id(ref, "ref", self._config, self._pos)
if ref is not None:
try:
table = self._votable.get_table_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ("TABLE", self.ref), self._config, self._pos)
ref = None
else:
self._fields = table.fields
self._params = table.params
self._groups = table.groups
self._links = table.links
else:
del self._fields[:]
del self._params[:]
del self._groups[:]
del self._links[:]
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def format(self):
"""The serialization format of the table [*required*].
Must be one of:
'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)
'fits' (FITS_).
Note that the 'fits' format, since it requires an external
file, can not be written out. Any file read in with 'fits'
format will be read out, by default, in 'tabledata' format.
See :ref:`astropy:votable-serialization`.
"""
return self._format
@format.setter
def format(self, format):
format = format.lower()
if format == "fits":
vo_raise(
"fits format can not be written out, only read.",
self._config,
self._pos,
NotImplementedError,
)
if format == "binary2":
if not self._config["version_1_3_or_later"]:
vo_raise(
"binary2 only supported in votable 1.3 or later",
self._config,
self._pos,
)
elif format not in ("tabledata", "binary"):
vo_raise(f"Invalid format '{format}'", self._config, self._pos)
self._format = format
@property
def nrows(self):
"""
[*immutable*] The number of rows in the table, as specified in
the XML file.
"""
return self._nrows
@property
def fields(self):
"""
A list of :class:`Field` objects describing the types of each
of the data columns.
"""
return self._fields
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
table. Must contain only :class:`Param` objects.
"""
return self._params
@property
def groups(self):
"""
A list of :class:`Group` objects describing how the columns
and parameters are grouped. Currently this information is
only kept around for round-tripping and informational
purposes.
"""
return self._groups
@property
def links(self):
"""
A list of :class:`Link` objects (pointers to other documents
or servers through a URI) for the table.
"""
return self._links
@property
def infos(self):
"""
A list of :class:`Info` objects for the table. Allows for
post-operational diagnostics.
"""
return self._infos
def is_empty(self):
"""
Returns True if this table doesn't contain any real data
because it was skipped over by the parser (through use of the
``table_number`` kwarg).
"""
return self._empty
def create_arrays(self, nrows=0, config=None):
"""
Create a new array to hold the data based on the current set
of fields, and store them in the *array* and member variable.
Any data in the existing array will be lost.
*nrows*, if provided, is the number of rows to allocate.
"""
if nrows is None:
nrows = 0
fields = self.fields
if len(fields) == 0:
array = np.recarray((nrows,), dtype="O")
mask = np.zeros((nrows,), dtype="b")
else:
# for field in fields: field._setup(config)
Field.uniqify_names(fields)
dtype = []
for x in fields:
if x._unique_name == x.ID:
id = x.ID
else:
id = (x._unique_name, x.ID)
dtype.append((id, x.converter.format))
array = np.recarray((nrows,), dtype=np.dtype(dtype))
descr_mask = []
for d in array.dtype.descr:
new_type = (d[1][1] == "O" and "O") or "bool"
if len(d) == 2:
descr_mask.append((d[0], new_type))
elif len(d) == 3:
descr_mask.append((d[0], new_type, d[2]))
mask = np.zeros((nrows,), dtype=descr_mask)
self.array = ma.array(array, mask=mask)
def _resize_strategy(self, size):
"""
Return a new (larger) size based on size, used for
reallocating an array when it fills up. This is in its own
function so the resizing strategy can be easily replaced.
"""
# Once we go beyond 0, make a big step -- after that use a
# factor of 1.5 to help keep memory usage compact
if size == 0:
return 512
return int(np.ceil(size * RESIZE_AMOUNT))
def _add_field(self, iterator, tag, data, config, pos):
field = Field(self._votable, config=config, pos=pos, **data)
self.fields.append(field)
field.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
if not config.get("version_1_2_or_later"):
warn_or_raise(W26, W26, ("INFO", "TABLE", "1.2"), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def parse(self, iterator, config):
columns = config.get("columns")
# If we've requested to read in only a specific table, skip
# all others
table_number = config.get("table_number")
current_table_number = config.get("_current_table_number")
skip_table = False
if current_table_number is not None:
config["_current_table_number"] += 1
if table_number is not None and table_number != current_table_number:
skip_table = True
self._empty = True
table_id = config.get("table_id")
if table_id is not None:
if table_id != self.ID:
skip_table = True
self._empty = True
if self.ref is not None:
# This table doesn't have its own datatype descriptors, it
# just references those from another table.
# This is to call the property setter to go and get the
# referenced information
self.ref = self.ref
for start, tag, data, pos in iterator:
if start:
if tag == "DATA":
warn_unknown_attrs("DATA", data.keys(), config, pos)
break
else:
if tag == "TABLE":
return self
elif tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "RESOURCE", config, pos)
self.description = data or None
else:
tag_mapping = {
"FIELD": self._add_field,
"PARAM": self._add_param,
"GROUP": self._add_group,
"LINK": self._add_link,
"INFO": self._add_info,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
if tag == "DATA":
if len(self.fields) == 0:
warn_or_raise(E25, E25, None, config, pos)
warn_unknown_attrs("DATA", data.keys(), config, pos)
break
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "RESOURCE", config, pos)
self.description = data or None
elif tag == "TABLE":
# For error checking purposes
Field.uniqify_names(self.fields)
# We still need to create arrays, even if the file
# contains no DATA section
self.create_arrays(nrows=0, config=config)
return self
self.create_arrays(nrows=self._nrows, config=config)
fields = self.fields
names = [x.ID for x in fields]
# Deal with a subset of the columns, if requested.
if not columns:
colnumbers = list(range(len(fields)))
else:
if isinstance(columns, str):
columns = [columns]
columns = np.asarray(columns)
if issubclass(columns.dtype.type, np.integer):
if np.any(columns < 0) or np.any(columns > len(fields)):
raise ValueError("Some specified column numbers out of range")
colnumbers = columns
elif issubclass(columns.dtype.type, np.character):
try:
colnumbers = [names.index(x) for x in columns]
except ValueError:
raise ValueError(f"Columns '{columns}' not found in fields list")
else:
raise TypeError("Invalid columns list")
if (not skip_table) and (len(fields) > 0):
for start, tag, data, pos in iterator:
if start:
if tag == "TABLEDATA":
warn_unknown_attrs("TABLEDATA", data.keys(), config, pos)
self.array = self._parse_tabledata(
iterator, colnumbers, fields, config
)
break
elif tag == "BINARY":
warn_unknown_attrs("BINARY", data.keys(), config, pos)
self.array = self._parse_binary(
1, iterator, colnumbers, fields, config, pos
)
break
elif tag == "BINARY2":
if not config["version_1_3_or_later"]:
warn_or_raise(W52, W52, config["version"], config, pos)
self.array = self._parse_binary(
2, iterator, colnumbers, fields, config, pos
)
break
elif tag == "FITS":
warn_unknown_attrs("FITS", data.keys(), config, pos, ["extnum"])
try:
extnum = int(data.get("extnum", 0))
if extnum < 0:
raise ValueError("'extnum' cannot be negative.")
except ValueError:
vo_raise(E17, (), config, pos)
self.array = self._parse_fits(iterator, extnum, config)
break
else:
warn_or_raise(W37, W37, tag, config, pos)
break
for start, tag, data, pos in iterator:
if not start and tag == "DATA":
break
for start, tag, data, pos in iterator:
if start and tag == "INFO":
if not config.get("version_1_2_or_later"):
warn_or_raise(W26, W26, ("INFO", "TABLE", "1.2"), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
elif not start and tag == "TABLE":
break
return self
def _parse_tabledata(self, iterator, colnumbers, fields, config):
# Since we don't know the number of rows up front, we'll
# reallocate the record array to make room as we go. This
# prevents the need to scan through the XML twice. The
# allocation is by factors of 1.5.
invalid = config.get("invalid", "exception")
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
parsers = [field.converter.parse for field in fields]
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
colnumbers_bits = [i in colnumbers for i in range(len(fields))]
row_default = [x.converter.default for x in fields]
mask_default = [True] * len(fields)
array_chunk = []
mask_chunk = []
chunk_size = config.get("chunk_size", DEFAULT_CHUNK_SIZE)
for start, tag, data, pos in iterator:
if tag == "TR":
# Now parse one row
row = row_default[:]
row_mask = mask_default[:]
i = 0
for start, tag, data, pos in iterator:
if start:
binary = data.get("encoding", None) == "base64"
warn_unknown_attrs(tag, data.keys(), config, pos, ["encoding"])
else:
if tag == "TD":
if i >= len(fields):
vo_raise(E20, len(fields), config, pos)
if colnumbers_bits[i]:
try:
if binary:
rawdata = base64.b64decode(data.encode("ascii"))
buf = io.BytesIO(rawdata)
buf.seek(0)
try:
value, mask_value = binparsers[i](buf.read)
except Exception as e:
vo_reraise(
e,
config,
pos,
"(in row {:d}, col '{}')".format(
len(array_chunk), fields[i].ID
),
)
else:
try:
value, mask_value = parsers[i](
data, config, pos
)
except Exception as e:
vo_reraise(
e,
config,
pos,
"(in row {:d}, col '{}')".format(
len(array_chunk), fields[i].ID
),
)
except Exception as e:
if invalid == "exception":
vo_reraise(e, config, pos)
else:
row[i] = value
row_mask[i] = mask_value
elif tag == "TR":
break
else:
self._add_unknown_tag(iterator, tag, data, config, pos)
i += 1
if i < len(fields):
vo_raise(E21, (i, len(fields)), config, pos)
array_chunk.append(tuple(row))
mask_chunk.append(tuple(row_mask))
if len(array_chunk) == chunk_size:
while numrows + chunk_size > alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
if alloc_rows != len(array):
array = _resize(array, alloc_rows)
array[numrows : numrows + chunk_size] = array_chunk
array.mask[numrows : numrows + chunk_size] = mask_chunk
numrows += chunk_size
array_chunk = []
mask_chunk = []
elif not start and tag == "TABLEDATA":
break
# Now, resize the array to the exact number of rows we need and
# put the last chunk values in there.
alloc_rows = numrows + len(array_chunk)
array = _resize(array, alloc_rows)
array[numrows:] = array_chunk
if alloc_rows != 0:
array.mask[numrows:] = mask_chunk
numrows += len(array_chunk)
if self.nrows is not None and self.nrows >= 0 and self.nrows != numrows:
warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)
self._nrows = numrows
return array
def _get_binary_data_stream(self, iterator, config):
have_local_stream = False
for start, tag, data, pos in iterator:
if tag == "STREAM":
if start:
warn_unknown_attrs(
"STREAM",
data.keys(),
config,
pos,
["type", "href", "actuate", "encoding", "expires", "rights"],
)
if "href" not in data:
have_local_stream = True
if data.get("encoding", None) != "base64":
warn_or_raise(
W38, W38, data.get("encoding", None), config, pos
)
else:
href = data["href"]
xmlutil.check_anyuri(href, config, pos)
encoding = data.get("encoding", None)
else:
buffer = data
break
if have_local_stream:
buffer = base64.b64decode(buffer.encode("ascii"))
string_io = io.BytesIO(buffer)
string_io.seek(0)
read = string_io.read
else:
if not href.startswith(("http", "ftp", "file")):
vo_raise(
"The vo package only supports remote data through http, "
+ "ftp or file",
self._config,
self._pos,
NotImplementedError,
)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == "gzip":
fd = gzip.GzipFile(href, "rb", fileobj=fd)
elif encoding == "base64":
fd = codecs.EncodedFile(fd, "base64")
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config,
self._pos,
NotImplementedError,
)
read = fd.read
def careful_read(length):
result = read(length)
if len(result) != length:
raise EOFError
return result
return careful_read
def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):
fields = self.fields
careful_read = self._get_binary_data_stream(iterator, config)
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
while True:
# Resize result arrays if necessary
if numrows >= alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
array = _resize(array, alloc_rows)
row_data = []
row_mask_data = []
try:
if mode == 2:
mask_bits = careful_read(int((len(fields) + 7) / 8))
row_mask_data = list(
converters.bitarray_to_bool(mask_bits, len(fields))
)
# Ignore the mask for string columns (see issue 8995)
for i, f in enumerate(fields):
if row_mask_data[i] and (
f.datatype == "char" or f.datatype == "unicodeChar"
):
row_mask_data[i] = False
for i, binparse in enumerate(binparsers):
try:
value, value_mask = binparse(careful_read)
except EOFError:
raise
except Exception as e:
vo_reraise(
e,
config,
pos,
f"(in row {numrows:d}, col '{fields[i].ID}')",
)
row_data.append(value)
if mode == 1:
row_mask_data.append(value_mask)
else:
row_mask_data[i] = row_mask_data[i] or value_mask
except EOFError:
break
row = [x.converter.default for x in fields]
row_mask = [False] * len(fields)
for i in colnumbers:
row[i] = row_data[i]
row_mask[i] = row_mask_data[i]
array[numrows] = tuple(row)
array.mask[numrows] = tuple(row_mask)
numrows += 1
array = _resize(array, numrows)
return array
def _parse_fits(self, iterator, extnum, config):
for start, tag, data, pos in iterator:
if tag == "STREAM":
if start:
warn_unknown_attrs(
"STREAM",
data.keys(),
config,
pos,
["type", "href", "actuate", "encoding", "expires", "rights"],
)
href = data["href"]
encoding = data.get("encoding", None)
else:
break
if not href.startswith(("http", "ftp", "file")):
vo_raise(
"The vo package only supports remote data through http, ftp or file",
self._config,
self._pos,
NotImplementedError,
)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == "gzip":
fd = gzip.GzipFile(href, "r", fileobj=fd)
elif encoding == "base64":
fd = codecs.EncodedFile(fd, "base64")
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config,
self._pos,
NotImplementedError,
)
hdulist = fits.open(fd)
array = hdulist[int(extnum)].data
if array.dtype != self.array.dtype:
warn_or_raise(W19, W19, (), self._config, self._pos)
return array
def to_xml(self, w, **kwargs):
specified_format = kwargs.get("tabledata_format")
if specified_format is not None:
format = specified_format
else:
format = self.format
if format == "fits":
format = "tabledata"
with w.tag(
"TABLE",
attrib=w.object_attrs(self, ("ID", "name", "ref", "ucd", "utype", "nrows")),
):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.fields, self.params):
for element in element_set:
element._setup({}, None)
if self.ref is None:
for element_set in (self.fields, self.params, self.groups, self.links):
for element in element_set:
element.to_xml(w, **kwargs)
elif kwargs["version_1_2_or_later"]:
index = list(self._votable.iter_tables()).index(self)
group = Group(self, ID=f"_g{index}")
group.to_xml(w, **kwargs)
if len(self.array):
with w.tag("DATA"):
if format == "tabledata":
self._write_tabledata(w, **kwargs)
elif format == "binary":
self._write_binary(1, w, **kwargs)
elif format == "binary2":
self._write_binary(2, w, **kwargs)
if kwargs["version_1_2_or_later"]:
for element in self._infos:
element.to_xml(w, **kwargs)
def _write_tabledata(self, w, **kwargs):
fields = self.fields
array = self.array
with w.tag("TABLEDATA"):
w._flush()
if _has_c_tabledata_writer and not kwargs.get("_debug_python_based_parser"):
supports_empty_values = [
field.converter.supports_empty_values(kwargs) for field in fields
]
fields = [field.converter.output for field in fields]
indent = len(w._tags) - 1
tablewriter.write_tabledata(
w.write,
array.data,
array.mask,
fields,
supports_empty_values,
indent,
1 << 8,
)
else:
write = w.write
indent_spaces = w.get_indentation_spaces()
tr_start = indent_spaces + "<TR>\n"
tr_end = indent_spaces + "</TR>\n"
td = indent_spaces + " <TD>{}</TD>\n"
td_empty = indent_spaces + " <TD/>\n"
fields = [
(
i,
field.converter.output,
field.converter.supports_empty_values(kwargs),
)
for i, field in enumerate(fields)
]
for row in range(len(array)):
write(tr_start)
array_row = array.data[row]
mask_row = array.mask[row]
for i, output, supports_empty_values in fields:
data = array_row[i]
masked = mask_row[i]
if supports_empty_values and np.all(masked):
write(td_empty)
else:
try:
val = output(data, masked)
except Exception as e:
vo_reraise(
e,
additional="(in row {:d}, col '{}')".format(
row, self.fields[i].ID
),
)
if len(val):
write(td.format(val))
else:
write(td_empty)
write(tr_end)
def _write_binary(self, mode, w, **kwargs):
fields = self.fields
array = self.array
if mode == 1:
tag_name = "BINARY"
else:
tag_name = "BINARY2"
with w.tag(tag_name):
with w.tag("STREAM", encoding="base64"):
fields_basic = [
(i, field.converter.binoutput) for (i, field) in enumerate(fields)
]
data = io.BytesIO()
for row in range(len(array)):
array_row = array.data[row]
array_mask = array.mask[row]
if mode == 2:
flattened = np.array([np.all(x) for x in array_mask])
data.write(converters.bool_to_bitarray(flattened))
for i, converter in fields_basic:
try:
chunk = converter(array_row[i], array_mask[i])
assert type(chunk) == bytes
except Exception as e:
vo_reraise(
e, additional=f"(in row {row:d}, col '{fields[i].ID}')"
)
data.write(chunk)
w._flush()
w.write(base64.b64encode(data.getvalue()).decode("ascii"))
def to_table(self, use_names_over_ids=False):
"""
Convert this VO Table to an `astropy.table.Table` instance.
Parameters
----------
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the
names of columns in the `astropy.table.Table` instance.
Since names are not guaranteed to be unique, this may cause
some columns to be renamed by appending numbers to the end.
Otherwise (default), use the ID attributes as the column
names.
.. warning::
Variable-length array fields may not be restored
identically when round-tripping through the
`astropy.table.Table` instance.
"""
from astropy.table import Table
meta = {}
for key in ["ID", "name", "ref", "ucd", "utype", "description"]:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if use_names_over_ids:
names = [field.name for field in self.fields]
unique_names = []
for i, name in enumerate(names):
new_name = name
i = 2
while new_name in unique_names:
new_name = f"{name}{i}"
i += 1
unique_names.append(new_name)
names = unique_names
else:
names = [field.ID for field in self.fields]
table = Table(self.array, names=names, meta=meta)
for name, field in zip(names, self.fields):
column = table[name]
field.to_table_column(column)
return table
@classmethod
def from_table(cls, votable, table):
"""
Create a `Table` instance from a given `astropy.table.Table`
instance.
"""
kwargs = {}
for key in ["ID", "name", "ref", "ucd", "utype"]:
val = table.meta.get(key)
if val is not None:
kwargs[key] = val
new_table = cls(votable, **kwargs)
if "description" in table.meta:
new_table.description = table.meta["description"]
for colname in table.colnames:
column = table[colname]
new_table.fields.append(Field.from_table_column(votable, column))
if table.mask is None:
new_table.array = ma.array(np.asarray(table))
else:
new_table.array = ma.array(np.asarray(table), mask=np.asarray(table.mask))
return new_table
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD and PARAM elements in the
TABLE.
"""
yield from self.params
yield from self.fields
for group in self.groups:
yield from group.iter_fields_and_params()
get_field_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_fields_and_params",
"FIELD or PARAM",
"""
Looks up a FIELD or PARAM element by the given ID.
""",
)
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
"iter_fields_and_params",
"FIELD or PARAM",
"""
Looks up a FIELD or PARAM element by the given ID or name.
""",
)
get_fields_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_fields_and_params",
"FIELD or PARAM",
"""
Looks up a FIELD or PARAM element by the given utype and
returns an iterator emitting all matches.
""",
)
def iter_groups(self):
"""
Recursively iterate over all GROUP elements in the TABLE.
"""
for group in self.groups:
yield group
yield from group.iter_groups()
get_group_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_groups",
"GROUP",
"""
Looks up a GROUP element by the given ID. Used by the group's
"ref" attribute
""",
)
get_groups_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_groups",
"GROUP",
"""
Looks up a GROUP element by the given utype and returns an
iterator emitting all matches.
""",
)
def iter_info(self):
yield from self.infos
class Resource(
Element, _IDProperty, _NameProperty, _UtypeProperty, _DescriptionProperty
):
"""
RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
name=None,
ID=None,
utype=None,
type="results",
id=None,
config=None,
pos=None,
**kwargs,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.name = name
self.ID = resolve_id(ID, id, config, pos)
self.utype = utype
self.type = type
self._extra_attributes = kwargs
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._groups = HomogeneousList(Group)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._links = HomogeneousList(Link)
self._tables = HomogeneousList(Table)
self._resources = HomogeneousList(Resource)
warn_unknown_attrs("RESOURCE", kwargs.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
w = XMLWriter(buff)
w.element(self._element_name, attrib=w.object_attrs(self, self._attr_list))
return buff.getvalue().strip()
@property
def type(self):
"""The type of the resource [*required*].
Must be either:
- 'results': This resource contains actual result values
(default)
- 'meta': This resource contains only datatype descriptions
(FIELD_ elements), but no actual data.
"""
return self._type
@type.setter
def type(self, type):
if type not in ("results", "meta"):
vo_raise(E18, type, self._config, self._pos)
self._type = type
@property
def extra_attributes(self):
"""Dictionary of extra attributes of the RESOURCE_ element.
This is dictionary of string keys to string values containing any
extra attributes of the RESOURCE_ element that are not defined
in the specification. The specification explicitly allows
for extra attributes here, but nowhere else.
"""
return self._extra_attributes
@property
def coordinate_systems(self):
"""
A list of coordinate system definitions (COOSYS_ elements) for
the RESOURCE_. Must contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system definitions (TIMESYS_ elements) for
the RESOURCE_. Must contain only `TimeSys` objects.
"""
return self._time_systems
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
resource. Must only contain `Info` objects.
"""
return self._infos
@property
def groups(self):
"""
A list of groups.
"""
return self._groups
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
resource. Must contain only `Param` objects.
"""
return self._params
@property
def links(self):
"""
A list of links (pointers to other documents or servers
through a URI) for the resource. Must contain only `Link`
objects.
"""
return self._links
@property
def tables(self):
"""
A list of tables in the resource. Must contain only
`Table` objects.
"""
return self._tables
@property
def resources(self):
"""
A list of nested resources inside this resource. Must contain
only `Resource` objects.
"""
return self._resources
def _add_table(self, iterator, tag, data, config, pos):
table = Table(self._votable, config=config, pos=pos, **data)
self.tables.append(table)
table.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self._votable, iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def parse(self, votable, iterator, config):
self._votable = votable
tag_mapping = {
"TABLE": self._add_table,
"INFO": self._add_info,
"PARAM": self._add_param,
"GROUP": self._add_group,
"COOSYS": self._add_coosys,
"TIMESYS": self._add_timesys,
"RESOURCE": self._add_resource,
"LINK": self._add_link,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
elif tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "RESOURCE", config, pos)
self.description = data or None
elif tag == "RESOURCE":
break
del self._votable
return self
def to_xml(self, w, **kwargs):
attrs = w.object_attrs(self, ("ID", "type", "utype"))
attrs.update(self.extra_attributes)
with w.tag("RESOURCE", attrib=attrs):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (
self.coordinate_systems,
self.time_systems,
self.params,
self.infos,
self.links,
self.tables,
self.resources,
):
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Recursively iterates over all tables in the resource and
nested resources.
"""
yield from self.tables
for resource in self.resources:
yield from resource.iter_tables()
def iter_fields_and_params(self):
"""
Recursively iterates over all FIELD_ and PARAM_ elements in
the resource, its tables and nested resources.
"""
yield from self.params
for table in self.tables:
yield from table.iter_fields_and_params()
for resource in self.resources:
yield from resource.iter_fields_and_params()
def iter_coosys(self):
"""
Recursively iterates over all the COOSYS_ elements in the
resource and nested resources.
"""
yield from self.coordinate_systems
for resource in self.resources:
yield from resource.iter_coosys()
def iter_timesys(self):
"""
Recursively iterates over all the TIMESYS_ elements in the
resource and nested resources.
"""
yield from self.time_systems
for resource in self.resources:
yield from resource.iter_timesys()
def iter_info(self):
"""
Recursively iterates over all the INFO_ elements in the
resource and nested resources.
"""
yield from self.infos
for table in self.tables:
yield from table.iter_info()
for resource in self.resources:
yield from resource.iter_info()
class VOTableFile(Element, _IDProperty, _DescriptionProperty):
"""
VOTABLE_ element: represents an entire file.
The keyword arguments correspond to setting members of the same
name, documented below.
*version* is settable at construction time only, since conformance
tests for building the rest of the structure depend on it.
"""
def __init__(self, ID=None, id=None, config=None, pos=None, version="1.4"):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._resources = HomogeneousList(Resource)
self._groups = HomogeneousList(Group)
version = str(version)
if version == "1.0":
warnings.warn(
"VOTable 1.0 support is deprecated in astropy 4.3 and will be "
"removed in a future release",
AstropyDeprecationWarning,
)
elif (version != "1.0") and (version not in self._version_namespace_map):
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(f"'version' should be in ('1.0', '{allowed_from_map}').")
self._version = version
def __repr__(self):
n_tables = len(list(self.iter_tables()))
return f"<VOTABLE>... {n_tables} tables ...</VOTABLE>"
@property
def version(self):
"""
The version of the VOTable specification that the file uses.
"""
return self._version
@version.setter
def version(self, version):
version = str(version)
if version not in self._version_namespace_map:
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(
"astropy.io.votable only supports VOTable versions"
f" '{allowed_from_map}'"
)
self._version = version
@property
def coordinate_systems(self):
"""
A list of coordinate system descriptions for the file. Must
contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system descriptions for the file. Must
contain only `TimeSys` objects.
"""
return self._time_systems
@property
def params(self):
"""
A list of parameters (constant-valued columns) that apply to
the entire file. Must contain only `Param` objects.
"""
return self._params
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
entire file. Must only contain `Info` objects.
"""
return self._infos
@property
def resources(self):
"""
A list of resources, in the order they appear in the file.
Must only contain `Resource` objects.
"""
return self._resources
@property
def groups(self):
"""
A list of groups, in the order they appear in the file. Only
supported as a child of the VOTABLE element in VOTable 1.2 or
later.
"""
return self._groups
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self, iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
if not config.get("version_1_2_or_later"):
warn_or_raise(W26, W26, ("GROUP", "VOTABLE", "1.2"), config, pos)
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _get_version_checks(self):
config = {}
config["version_1_1_or_later"] = util.version_compare(self.version, "1.1") >= 0
config["version_1_2_or_later"] = util.version_compare(self.version, "1.2") >= 0
config["version_1_3_or_later"] = util.version_compare(self.version, "1.3") >= 0
config["version_1_4_or_later"] = util.version_compare(self.version, "1.4") >= 0
return config
# Map VOTable version numbers to namespace URIs and schema information.
_version_namespace_map = {
# Version 1.0 isn't well-supported, but is allowed on parse (with a warning).
# It used DTD rather than schema, so this information would not be useful.
# By omitting 1.0 from this dict we can use the keys as the list of versions
# that are allowed in various other checks.
"1.1": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.1",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.1",
},
"1.2": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.2",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.2",
},
# With 1.3 we'll be more explicit with the schema location.
# - xsi:schemaLocation uses the namespace name along with the URL
# to reference it.
# - For convenience, but somewhat confusingly, the namespace URIs
# are also usable URLs for accessing an applicable schema.
# However to avoid confusion, we'll use the explicit schema URL.
"1.3": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value": (
"http://www.ivoa.net/xml/VOTable/v1.3"
" http://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"
),
},
# With 1.4 namespace URIs stopped incrementing with minor version changes
# so we use the same URI as with 1.3. See this IVOA note for more info:
# http://www.ivoa.net/documents/Notes/XMLVers/20180529/
"1.4": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value": (
"http://www.ivoa.net/xml/VOTable/v1.3"
" http://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"
),
},
}
def parse(self, iterator, config):
config["_current_table_number"] = 0
for start, tag, data, pos in iterator:
if start:
if tag == "xml":
pass
elif tag == "VOTABLE":
if "version" not in data:
warn_or_raise(W20, W20, self.version, config, pos)
config["version"] = self.version
else:
config["version"] = self._version = data["version"]
if config["version"].lower().startswith("v"):
warn_or_raise(W29, W29, config["version"], config, pos)
self._version = config["version"] = config["version"][1:]
if config["version"] not in self._version_namespace_map:
vo_warn(W21, config["version"], config, pos)
if "xmlns" in data:
ns_info = self._version_namespace_map.get(config["version"], {})
correct_ns = ns_info.get("namespace_uri")
if data["xmlns"] != correct_ns:
vo_warn(W41, (correct_ns, data["xmlns"]), config, pos)
else:
vo_warn(W42, (), config, pos)
break
else:
vo_raise(E19, (), config, pos)
config.update(self._get_version_checks())
tag_mapping = {
"PARAM": self._add_param,
"RESOURCE": self._add_resource,
"COOSYS": self._add_coosys,
"TIMESYS": self._add_timesys,
"INFO": self._add_info,
"DEFINITIONS": self._add_definitions,
"DESCRIPTION": self._ignore_add,
"GROUP": self._add_group,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
elif tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "VOTABLE", config, pos)
self.description = data or None
if not len(self.resources) and config["version_1_2_or_later"]:
warn_or_raise(W53, W53, (), config, pos)
return self
def to_xml(
self,
fd,
compressed=False,
tabledata_format=None,
_debug_python_based_parser=False,
_astropy_version=None,
):
"""
Write to an XML file.
Parameters
----------
fd : str or file-like
Where to write the file. If a file-like object, must be writable.
compressed : bool, optional
When `True`, write to a gzip-compressed file. (Default:
`False`)
tabledata_format : str, optional
Override the format of the table(s) data to write. Must
be one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified
in each `Table` object as it was created or read in. See
:ref:`astropy:votable-serialization`.
"""
if tabledata_format is not None:
if tabledata_format.lower() not in ("tabledata", "binary", "binary2"):
raise ValueError(f"Unknown format type '{format}'")
kwargs = {
"version": self.version,
"tabledata_format": tabledata_format,
"_debug_python_based_parser": _debug_python_based_parser,
"_group_number": 1,
}
kwargs.update(self._get_version_checks())
with util.convert_to_writable_filelike(fd, compressed=compressed) as fd:
w = XMLWriter(fd)
version = self.version
if _astropy_version is None:
lib_version = astropy_version
else:
lib_version = _astropy_version
xml_header = """
<?xml version="1.0" encoding="utf-8"?>
<!-- Produced with astropy.io.votable version {lib_version}
http://www.astropy.org/ -->\n"""
w.write(xml_header.lstrip().format(**locals()))
# Build the VOTABLE tag attributes.
votable_attr = {
"version": version,
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
ns_info = self._version_namespace_map.get(version, {})
namespace_uri = ns_info.get("namespace_uri")
if namespace_uri:
votable_attr["xmlns"] = namespace_uri
schema_location_attr = ns_info.get("schema_location_attr")
schema_location_value = ns_info.get("schema_location_value")
if schema_location_attr and schema_location_value:
votable_attr[schema_location_attr] = schema_location_value
with w.tag("VOTABLE", votable_attr):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
element_sets = [
self.coordinate_systems,
self.time_systems,
self.params,
self.infos,
self.resources,
]
if kwargs["version_1_2_or_later"]:
element_sets[0] = self.groups
for element_set in element_sets:
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Iterates over all tables in the VOTable file in a "flat" way,
ignoring the nesting of resources etc.
"""
for resource in self.resources:
yield from resource.iter_tables()
def get_first_table(self):
"""
Often, you know there is only one table in the file, and
that's all you need. This method returns that first table.
"""
for table in self.iter_tables():
if not table.is_empty():
return table
raise IndexError("No table found in VOTABLE file.")
get_table_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_tables",
"TABLE",
"""
Looks up a TABLE_ element by the given ID. Used by the table
"ref" attribute.
""",
)
get_tables_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_tables",
"TABLE",
"""
Looks up a TABLE_ element by the given utype, and returns an
iterator emitting all matches.
""",
)
def get_table_by_index(self, idx):
"""
Get a table by its ordinal position in the file.
"""
for i, table in enumerate(self.iter_tables()):
if i == idx:
return table
raise IndexError(f"No table at index {idx:d} found in VOTABLE file.")
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD_ and PARAM_ elements in the
VOTABLE_ file.
"""
for resource in self.resources:
yield from resource.iter_fields_and_params()
get_field_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_fields_and_params",
"FIELD",
"""
Looks up a FIELD_ element by the given ID_. Used by the field's
"ref" attribute.
""",
)
get_fields_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_fields_and_params",
"FIELD",
"""
Looks up a FIELD_ element by the given utype and returns an
iterator emitting all matches.
""",
)
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
"iter_fields_and_params",
"FIELD",
"""
Looks up a FIELD_ element by the given ID_ or name.
""",
)
def iter_values(self):
"""
Recursively iterate over all VALUES_ elements in the VOTABLE_
file.
"""
for field in self.iter_fields_and_params():
yield field.values
get_values_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_values",
"VALUES",
"""
Looks up a VALUES_ element by the given ID. Used by the values
"ref" attribute.
""",
)
def iter_groups(self):
"""
Recursively iterate over all GROUP_ elements in the VOTABLE_
file.
"""
for table in self.iter_tables():
yield from table.iter_groups()
get_group_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_groups",
"GROUP",
"""
Looks up a GROUP_ element by the given ID. Used by the group's
"ref" attribute
""",
)
get_groups_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_groups",
"GROUP",
"""
Looks up a GROUP_ element by the given utype and returns an
iterator emitting all matches.
""",
)
def iter_coosys(self):
"""
Recursively iterate over all COOSYS_ elements in the VOTABLE_
file.
"""
yield from self.coordinate_systems
for resource in self.resources:
yield from resource.iter_coosys()
get_coosys_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_coosys",
"COOSYS",
"""Looks up a COOSYS_ element by the given ID.""",
)
def iter_timesys(self):
"""
Recursively iterate over all TIMESYS_ elements in the VOTABLE_
file.
"""
yield from self.time_systems
for resource in self.resources:
yield from resource.iter_timesys()
get_timesys_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_timesys",
"TIMESYS",
"""Looks up a TIMESYS_ element by the given ID.""",
)
def iter_info(self):
"""
Recursively iterate over all INFO_ elements in the VOTABLE_
file.
"""
yield from self.infos
for resource in self.resources:
yield from resource.iter_info()
get_info_by_id = _lookup_by_attr_factory(
"ID", True, "iter_info", "INFO", """Looks up a INFO element by the given ID."""
)
get_infos_by_name = _lookup_by_attr_factory(
"name",
False,
"iter_info",
"INFO",
"""Returns all INFO children with the given name.""",
)
def set_all_tables_format(self, format):
"""
Set the output storage format of all tables in the file.
"""
for table in self.iter_tables():
table.format = format
@classmethod
def from_table(cls, table, table_id=None):
"""
Create a `VOTableFile` instance from a given
`astropy.table.Table` instance.
Parameters
----------
table_id : str, optional
Set the given ID attribute on the returned Table instance.
"""
votable_file = cls()
resource = Resource()
votable = Table.from_table(votable_file, table)
if table_id is not None:
votable.ID = table_id
resource.tables.append(votable)
votable_file.resources.append(resource)
return votable_file
|
251a4ef94f3a492475c38309b721ce5e6493c81273227d103cf21b7953f986c0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""VOTable exceptions and warnings.
.. _warnings:
Warnings
--------
.. note::
Most of the following warnings indicate violations of the VOTable
specification. They should be reported to the authors of the
tools that produced the VOTable file.
To control the warnings emitted, use the standard Python
:mod:`warnings` module and the ``astropy.io.votable.exceptions.conf.max_warnings``
configuration item. Most of these are of the type `VOTableSpecWarning`.
{warnings}
.. _exceptions:
Exceptions
----------
.. note::
This is a list of many of the fatal exceptions emitted by ``astropy.io.votable``
when the file does not conform to spec. Other exceptions may be
raised due to unforeseen cases or bugs in ``astropy.io.votable`` itself.
{exceptions}
"""
# STDLIB
import io
import re
from textwrap import dedent
from warnings import warn
from astropy import config as _config
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"Conf",
"conf",
"warn_or_raise",
"vo_raise",
"vo_reraise",
"vo_warn",
"warn_unknown_attrs",
"parse_vowarning",
"VOWarning",
"VOTableChangeWarning",
"VOTableSpecWarning",
"UnimplementedWarning",
"IOWarning",
"VOTableSpecError",
]
# NOTE: Cannot put this in __init__.py due to circular import.
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable.exceptions`.
"""
max_warnings = _config.ConfigItem(
10,
"Number of times the same type of warning is displayed before being suppressed",
cfgtype="integer",
)
conf = Conf()
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ("?", "?")
filename = config.get("filename", "?")
return f"{filename}:{pos[0]}:{pos[1]}: {name}: {message}"
def _suppressed_warning(warning, config, stacklevel=2):
warning_class = type(warning)
config.setdefault("_warning_counts", dict()).setdefault(warning_class, 0)
config["_warning_counts"][warning_class] += 1
message_count = config["_warning_counts"][warning_class]
if message_count <= conf.max_warnings:
if message_count == conf.max_warnings:
warning.formatted_message += (
" (suppressing further warnings of this type...)"
)
warn(warning, stacklevel=stacklevel + 1)
def warn_or_raise(
warning_class, exception_class=None, args=(), config=None, pos=None, stacklevel=1
):
"""
Warn or raise an exception, depending on the verify setting.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
config_value = config.get("verify", "warn")
if config_value == "exception":
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
elif config_value == "warn":
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel + 1)
def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos)
def vo_reraise(exc, config=None, pos=None, additional=""):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += " " + additional
exc.args = (message,)
raise exc
def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
if config.get("verify", "warn") != "ignore":
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel + 1)
def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1):
for attr in attrs:
if attr not in good_attr:
vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel + 1)
_warning_pat = re.compile(
r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): "
+ r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$"
)
def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result["warning"] = warning = match.group("warning")
if warning is not None:
result["is_warning"] = warning[0].upper() == "W"
result["is_exception"] = not result["is_warning"]
result["number"] = int(match.group("warning")[1:])
result["doc_url"] = f"io/votable/api_exceptions.html#{warning.lower()}"
else:
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = True
result["number"] = None
result["doc_url"] = None
try:
result["nline"] = int(match.group("nline"))
except ValueError:
result["nline"] = 0
try:
result["nchar"] = int(match.group("nchar"))
except ValueError:
result["nchar"] = 0
result["message"] = match.group("rest")
result["is_something"] = True
else:
result["warning"] = None
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = False
result["is_something"] = False
if not isinstance(line, str):
line = line.decode("utf-8")
result["message"] = line
return result
class VOWarning(AstropyWarning):
"""
The base class of all VO warnings and exceptions.
Handles the formatting of the message with a warning or exception
code, filename, line and column number.
"""
default_args = ()
message_template = ""
def __init__(self, args, config=None, pos=None):
if config is None:
config = {}
if not isinstance(args, tuple):
args = (args,)
msg = self.message_template.format(*args)
self.formatted_message = _format_message(
msg, self.__class__.__name__, config, pos
)
Warning.__init__(self, self.formatted_message)
def __str__(self):
return self.formatted_message
@classmethod
def get_short_name(cls):
if len(cls.default_args):
return cls.message_template.format(*cls.default_args)
return cls.message_template
class VOTableChangeWarning(VOWarning, SyntaxWarning):
"""
A change has been made to the input XML file.
"""
class VOTableSpecWarning(VOWarning, SyntaxWarning):
"""
The input XML file violates the spec, but there is an obvious workaround.
"""
class UnimplementedWarning(VOWarning, SyntaxWarning):
"""
A feature of the VOTABLE_ spec is not implemented.
"""
class IOWarning(VOWarning, RuntimeWarning):
"""
A network or IO error occurred, but was recovered using the cache.
"""
class VOTableSpecError(VOWarning, ValueError):
"""
The input XML file violates the spec and there is no good workaround.
"""
class W01(VOTableSpecWarning):
"""Array uses commas rather than whitespace.
The VOTable spec states:
If a cell contains an array or complex number, it should be
encoded as multiple numbers separated by whitespace.
Many VOTable files in the wild use commas as a separator instead,
and ``astropy.io.votable`` can support this convention depending on the
:ref:`astropy:verifying-votables` setting.
``astropy.io.votable`` always outputs files using only spaces, regardless of
how they were input.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__
"""
message_template = "Array uses commas rather than whitespace"
class W02(VOTableSpecWarning):
r"""Nonstandard XML id.
XML ids must match the following regular expression::
^[A-Za-z_][A-Za-z0-9_\.\-]*$
The VOTable 1.1 says the following:
According to the XML standard, the attribute ``ID`` is a
string beginning with a letter or underscore (``_``), followed
by a sequence of letters, digits, or any of the punctuation
characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or
``:`` (colon).
However, this is in conflict with the XML standard, which says
colons may not be used. VOTable 1.1's own schema does not allow a
colon here. Therefore, ``astropy.io.votable`` disallows the colon.
VOTable 1.2 corrects this error in the specification.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`XML Names <https://www.w3.org/TR/xml-names/>`__
"""
message_template = "{} attribute '{}' is invalid. Must be a standard XML id"
default_args = ("x", "y")
class W03(VOTableChangeWarning):
"""Implicitly generating an ID from a name.
The VOTable 1.1 spec says the following about ``name`` vs. ``ID``
on ``FIELD`` and ``VALUE`` elements:
``ID`` and ``name`` attributes have a different role in
VOTable: the ``ID`` is meant as a *unique identifier* of an
element seen as a VOTable component, while the ``name`` is
meant for presentation purposes, and need not to be unique
throughout the VOTable document. The ``ID`` attribute is
therefore required in the elements which have to be
referenced, but in principle any element may have an ``ID``
attribute. ... In summary, the ``ID`` is different from the
``name`` attribute in that (a) the ``ID`` attribute is made
from a restricted character set, and must be unique throughout
a VOTable document whereas names are standard XML attributes
and need not be unique; and (b) there should be support in the
parsing software to look up references and extract the
relevant element with matching ``ID``.
It is further recommended in the VOTable 1.2 spec:
While the ``ID`` attribute has to be unique in a VOTable
document, the ``name`` attribute need not. It is however
recommended, as a good practice, to assign unique names within
a ``TABLE`` element. This recommendation means that, between a
``TABLE`` and its corresponding closing ``TABLE`` tag,
``name`` attributes of ``FIELD``, ``PARAM`` and optional
``GROUP`` elements should be all different.
Since ``astropy.io.votable`` requires a unique identifier for each of its
columns, ``ID`` is used for the column name when present.
However, when ``ID`` is not present, (since it is not required by
the specification) ``name`` is used instead. However, ``name``
must be cleansed by replacing invalid characters (such as
whitespace) with underscores.
.. note::
This warning does not indicate that the input file is invalid
with respect to the VOTable specification, only that the
column names in the record array may not match exactly the
``name`` attributes specified in the file.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Implicitly generating an ID from a name '{}' -> '{}'"
default_args = ("x", "y")
class W04(VOTableSpecWarning):
"""
The ``content-type`` attribute must use MIME content-type syntax as
defined in `RFC 2046 <https://tools.ietf.org/html/rfc2046>`__.
The current check for validity is somewhat over-permissive.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "content-type '{}' must be a valid MIME content type"
default_args = ("x",)
class W05(VOTableSpecWarning):
"""
The attribute must be a valid URI as defined in `RFC 2396
<https://www.ietf.org/rfc/rfc2396.txt>`_.
"""
message_template = "'{}' is not a valid URI"
default_args = ("x",)
class W06(VOTableSpecWarning):
"""
This warning is emitted when a ``ucd`` attribute does not match
the syntax of a `unified content descriptor
<http://vizier.u-strasbg.fr/doc/UCD.htx>`__.
If the VOTable version is 1.2 or later, the UCD will also be
checked to ensure it conforms to the controlled vocabulary defined
by UCD1+.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:ucd>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:ucd>`__
"""
message_template = "Invalid UCD '{}': {}"
default_args = ("x", "explanation")
class W07(VOTableSpecWarning):
"""Invalid astroYear.
As astro year field is a Besselian or Julian year matching the
regular expression::
^[JB]?[0-9]+([.][0-9]*)?$
Defined in this XML Schema snippet::
<xs:simpleType name="astroYear">
<xs:restriction base="xs:token">
<xs:pattern value="[JB]?[0-9]+([.][0-9]*)?"/>
</xs:restriction>
</xs:simpleType>
"""
message_template = "Invalid astroYear in {}: '{}'"
default_args = ("x", "y")
class W08(VOTableSpecWarning):
"""
To avoid local-dependent number parsing differences, ``astropy.io.votable``
may require a string or unicode string where a numeric type may
make more sense.
"""
message_template = "'{}' must be a str or bytes object"
default_args = ("x",)
class W09(VOTableSpecWarning):
"""
The VOTable specification uses the attribute name ``ID`` (with
uppercase letters) to specify unique identifiers. Some
VOTable-producing tools use the more standard lowercase ``id``
instead. ``astropy.io.votable`` accepts ``id`` and emits this warning if
``verify`` is ``'warn'``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "ID attribute not capitalized"
class W10(VOTableSpecWarning):
"""
The parser has encountered an element that does not exist in the
specification, or appears in an invalid context. Check the file
against the VOTable schema (with a tool such as `xmllint
<http://xmlsoft.org/xmllint.html>`__. If the file validates
against the schema, and you still receive this warning, this may
indicate a bug in ``astropy.io.votable``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Unknown tag '{}'. Ignoring"
default_args = ("x",)
class W11(VOTableSpecWarning):
"""
Earlier versions of the VOTable specification used a ``gref``
attribute on the ``LINK`` element to specify a `GLU reference
<http://aladin.u-strasbg.fr/glu/>`__. New files should
specify a ``glu:`` protocol using the ``href`` attribute.
Since ``astropy.io.votable`` does not currently support GLU references, it
likewise does not automatically convert the ``gref`` attribute to
the new form.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "The gref attribute on LINK is deprecated in VOTable 1.1"
class W12(VOTableChangeWarning):
"""
In order to name the columns of the Numpy record array, each
``FIELD`` element must have either an ``ID`` or ``name`` attribute
to derive a name from. Strictly speaking, according to the
VOTable schema, the ``name`` attribute is required. However, if
``name`` is not present by ``ID`` is, and ``verify`` is not ``'exception'``,
``astropy.io.votable`` will continue without a ``name`` defined.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = (
"'{}' element must have at least one of 'ID' or 'name' attributes"
)
default_args = ("x",)
class W13(VOTableSpecWarning):
"""Invalid VOTable datatype.
Some VOTable files in the wild use non-standard datatype names. These
are mapped to standard ones using the following mapping::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' is not a valid VOTable datatype, should be '{}'"
default_args = ("x", "y")
# W14: Deprecated
class W15(VOTableSpecWarning):
"""
The ``name`` attribute is required on every ``FIELD`` element.
However, many VOTable files in the wild omit it and provide only
an ``ID`` instead. In this case, when ``verify`` is not ``'exception'``
``astropy.io.votable`` will copy the ``name`` attribute to a new ``ID``
attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "{} element missing required 'name' attribute"
default_args = ("x",)
# W16: Deprecated
class W17(VOTableSpecWarning):
"""
A ``DESCRIPTION`` element can only appear once within its parent
element.
According to the schema, it may only occur once (`1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__)
However, it is a `proposed extension
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:addesc>`__
to VOTable 1.2.
"""
message_template = "{} element contains more than one DESCRIPTION element"
default_args = ("x",)
class W18(VOTableSpecWarning):
"""
The number of rows explicitly specified in the ``nrows`` attribute
does not match the actual number of rows (``TR`` elements) present
in the ``TABLE``. This may indicate truncation of the file, or an
internal error in the tool that produced it. If ``verify`` is not
``'exception'``, parsing will proceed, with the loss of some performance.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__
"""
message_template = "TABLE specified nrows={}, but table contains {} rows"
default_args = ("x", "y")
class W19(VOTableSpecWarning):
"""
The column fields as defined using ``FIELD`` elements do not match
those in the headers of the embedded FITS file. If ``verify`` is not
``'exception'``, the embedded FITS file will take precedence.
"""
message_template = (
"The fields defined in the VOTable do not match those in the "
+ "embedded FITS file"
)
class W20(VOTableSpecWarning):
"""
If no version number is explicitly given in the VOTable file, the
parser assumes it is written to the VOTable 1.1 specification.
"""
message_template = "No version number specified in file. Assuming {}"
default_args = ("1.1",)
class W21(UnimplementedWarning):
"""
Unknown issues may arise using ``astropy.io.votable`` with VOTable files
from a version other than 1.1, 1.2, 1.3, or 1.4.
"""
message_template = (
"astropy.io.votable is designed for VOTable version 1.1, 1.2, 1.3,"
" and 1.4, but this file is {}"
)
default_args = ("x",)
class W22(VOTableSpecWarning):
"""
Version 1.0 of the VOTable specification used the ``DEFINITIONS``
element to define coordinate systems. Version 1.1 now uses
``COOSYS`` elements throughout the document.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:definitions>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:definitions>`__
"""
message_template = "The DEFINITIONS element is deprecated in VOTable 1.1. Ignoring"
class W23(IOWarning):
"""
Raised when the VO service database can not be updated (possibly
due to a network outage). This is only a warning, since an older
and possible out-of-date VO service database was available
locally.
"""
message_template = "Unable to update service information for '{}'"
default_args = ("x",)
class W24(VOWarning, FutureWarning):
"""
The VO catalog database retrieved from the www is designed for a
newer version of ``astropy.io.votable``. This may cause problems or limited
features performing service queries. Consider upgrading ``astropy.io.votable``
to the latest version.
"""
message_template = (
"The VO catalog database is for a later version of astropy.io.votable"
)
class W25(IOWarning):
"""
A VO service query failed due to a network error or malformed
arguments. Another alternative service may be attempted. If all
services fail, an exception will be raised.
"""
message_template = "'{}' failed with: {}"
default_args = ("service", "...")
class W26(VOTableSpecWarning):
"""
The given element was not supported inside of the given element
until the specified VOTable version, however the version declared
in the file is for an earlier version. These attributes may not
be written out to the file.
"""
message_template = "'{}' inside '{}' added in VOTable {}"
default_args = ("child", "parent", "X.X")
class W27(VOTableSpecWarning):
"""
The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in
favor of a reference to the Space-Time Coordinate (STC) data
model (see `utype
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__
and the IVOA note `referencing STC in VOTable
<http://ivoa.net/Documents/latest/VOTableSTC.html>`__.
"""
message_template = "COOSYS deprecated in VOTable 1.2"
class W28(VOTableSpecWarning):
"""
The given attribute was not supported on the given element until the
specified VOTable version, however the version declared in the file is
for an earlier version. These attributes may not be written out to
the file.
"""
message_template = "'{}' on '{}' added in VOTable {}"
default_args = ("attribute", "element", "X.X")
class W29(VOTableSpecWarning):
"""
Some VOTable files specify their version number in the form "v1.0",
when the only supported forms in the spec are "1.0".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Version specified in non-standard form '{}'"
default_args = ("v1.0",)
class W30(VOTableSpecWarning):
"""
Some VOTable files write missing floating-point values in non-standard ways,
such as "null" and "-". If ``verify`` is not ``'exception'``, any
non-standard floating-point literals are treated as missing values.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid literal for float '{}'. Treating as empty."
default_args = ("x",)
class W31(VOTableSpecWarning):
"""
Since NaN's can not be represented in integer fields directly, a null
value must be specified in the FIELD descriptor to support reading
NaN's from the tabledata.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "NaN given in an integral field without a specified null value"
class W32(VOTableSpecWarning):
"""
Each field in a table must have a unique ID. If two or more fields
have the same ID, some will be renamed to ensure that all IDs are
unique.
From the VOTable 1.2 spec:
The ``ID`` and ``ref`` attributes are defined as XML types
``ID`` and ``IDREF`` respectively. This means that the
contents of ``ID`` is an identifier which must be unique
throughout a VOTable document, and that the contents of the
``ref`` attribute represents a reference to an identifier
which must exist in the VOTable document.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Duplicate ID '{}' renamed to '{}' to ensure uniqueness"
default_args = ("x", "x_2")
class W33(VOTableChangeWarning):
"""
Each field in a table must have a unique name. If two or more
fields have the same name, some will be renamed to ensure that all
names are unique.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Column name '{}' renamed to '{}' to ensure uniqueness"
default_args = ("x", "x_2")
class W34(VOTableSpecWarning):
"""
The attribute requires the value to be a valid XML token, as
defined by `XML 1.0
<http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__.
"""
message_template = "'{}' is an invalid token for attribute '{}'"
default_args = ("x", "y")
class W35(VOTableSpecWarning):
"""
The ``name`` and ``value`` attributes are required on all ``INFO``
elements.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__
"""
message_template = "'{}' attribute required for INFO elements"
default_args = ("x",)
class W36(VOTableSpecWarning):
"""
If the field specifies a ``null`` value, that value must conform
to the given ``datatype``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "null value '{}' does not match field datatype, setting to 0"
default_args = ("x",)
class W37(UnimplementedWarning):
"""
The 3 datatypes defined in the VOTable specification and supported by
``astropy.io.votable`` are ``TABLEDATA``, ``BINARY`` and ``FITS``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__
"""
message_template = "Unsupported data format '{}'"
default_args = ("x",)
class W38(VOTableSpecWarning):
"""
The only encoding for local binary data supported by the VOTable
specification is base64.
"""
message_template = "Inline binary data must be base64 encoded, got '{}'"
default_args = ("x",)
class W39(VOTableSpecWarning):
"""
Bit values do not support masking. This warning is raised upon
setting masked data in a bit column.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Bit values can not be masked"
class W40(VOTableSpecWarning):
"""
This is a terrible hack to support Simple Image Access Protocol
results from `NOIRLab Astro Data Archive <https://astroarchive.noirlab.edu/>`__. It
creates a field for the coordinate projection type of type "double",
which actually contains character data. We have to hack the field
to store character data, or we can't read it in. A warning will be
raised when this happens.
"""
message_template = "'cprojection' datatype repaired"
class W41(VOTableSpecWarning):
"""
An XML namespace was specified on the ``VOTABLE`` element, but the
namespace does not match what is expected for a ``VOTABLE`` file.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
Some files in the wild set the namespace to the location of the
VOTable schema, which is not correct and will not pass some
validating parsers.
"""
message_template = (
"An XML namespace is specified, but is incorrect. Expected '{}', got '{}'"
)
default_args = ("x", "y")
class W42(VOTableSpecWarning):
"""The root element should specify a namespace.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
"""
message_template = "No XML namespace specified"
class W43(VOTableSpecWarning):
"""Referenced elements should be defined before referees.
From the VOTable 1.2 spec:
In VOTable1.2, it is further recommended to place the ID
attribute prior to referencing it whenever possible.
"""
message_template = "{} ref='{}' which has not already been defined"
default_args = ("element", "x")
class W44(VOTableSpecWarning):
"""
``VALUES`` elements that reference another element should not have
their own content.
From the VOTable 1.2 spec:
The ``ref`` attribute of a ``VALUES`` element can be used to
avoid a repetition of the domain definition, by referring to a
previously defined ``VALUES`` element having the referenced
``ID`` attribute. When specified, the ``ref`` attribute
defines completely the domain without any other element or
attribute, as e.g. ``<VALUES ref="RAdomain"/>``
"""
message_template = "VALUES element with ref attribute has content ('{}')"
default_args = ("element",)
class W45(VOWarning, ValueError):
"""Invalid content-role attribute.
The ``content-role`` attribute on the ``LINK`` element must be one of
the following::
query, hints, doc, location
And in VOTable 1.3, additionally::
type
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
`1.3
<http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#sec:link>`__
"""
message_template = "content-role attribute '{}' invalid"
default_args = ("x",)
class W46(VOTableSpecWarning):
"""
The given char or unicode string is too long for the specified
field length.
"""
message_template = "{} value is too long for specified length of {}"
default_args = ("char or unicode", "x")
class W47(VOTableSpecWarning):
"""
If no arraysize is specified on a char field, the default of '1'
is implied, but this is rarely what is intended.
"""
message_template = "Missing arraysize indicates length 1"
class W48(VOTableSpecWarning):
"""
The attribute is not defined in the specification.
"""
message_template = "Unknown attribute '{}' on {}"
default_args = ("attribute", "element")
class W49(VOTableSpecWarning):
"""
Prior to VOTable 1.3, the empty cell was illegal for integer
fields.
If a \"null\" value was specified for the cell, it will be used
for the value, otherwise, 0 will be used.
"""
message_template = "Empty cell illegal for integer fields."
class W50(VOTableSpecWarning):
"""
Invalid unit string as defined in the `Units in the VO, Version 1.0
<https://www.ivoa.net/documents/VOUnits>`_ (VOTable version >= 1.4)
or `Standards for Astronomical Catalogues, Version 2.0
<http://cdsarc.u-strasbg.fr/doc/catstd-3.2.htx>`_ (version < 1.4).
Consider passing an explicit ``unit_format`` parameter if the units
in this file conform to another specification.
"""
message_template = "Invalid unit string '{}'"
default_args = ("x",)
class W51(VOTableSpecWarning):
"""
The integer value is out of range for the size of the field.
"""
message_template = "Value '{}' is out of range for a {} integer field"
default_args = ("x", "n-bit")
class W52(VOTableSpecWarning):
"""
The BINARY2 format was introduced in VOTable 1.3. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The BINARY2 format was introduced in VOTable 1.3, but "
"this file is declared as version '{}'"
)
default_args = ("1.2",)
class W53(VOTableSpecWarning):
"""
The VOTABLE element must contain at least one RESOURCE element.
"""
message_template = "VOTABLE element must contain at least one RESOURCE element."
default_args = ()
class W54(VOTableSpecWarning):
"""
The TIMESYS element was introduced in VOTable 1.4. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The TIMESYS element was introduced in VOTable 1.4, but "
"this file is declared as version '{}'"
)
default_args = ("1.3",)
class W55(VOTableSpecWarning):
"""
When non-ASCII characters are detected when reading
a TABLEDATA value for a FIELD with ``datatype="char"``, we
can issue this warning.
"""
message_template = (
'FIELD ({}) has datatype="char" but contains non-ASCII value ({})'
)
default_args = ("", "")
class E01(VOWarning, ValueError):
"""Invalid size specifier for a field.
The size specifier for a ``char`` or ``unicode`` field must be
only a number followed, optionally, by an asterisk.
Multi-dimensional size specifiers are not supported for these
datatypes.
Strings, which are defined as a set of characters, can be
represented in VOTable as a fixed- or variable-length array of
characters::
<FIELD name="unboundedString" datatype="char" arraysize="*"/>
A 1D array of strings can be represented as a 2D array of
characters, but given the logic above, it is possible to define a
variable-length array of fixed-length strings, but not a
fixed-length array of variable-length strings.
"""
message_template = "Invalid size specifier '{}' for a {} field (in field '{}')"
default_args = ("x", "char/unicode", "y")
class E02(VOWarning, ValueError):
"""Incorrect number of elements in array.
The number of array elements in the data does not match that specified
in the FIELD specifier.
"""
message_template = (
"Incorrect number of elements in array. Expected multiple of {}, got {}"
)
default_args = ("x", "y")
class E03(VOWarning, ValueError):
"""Complex numbers should be two values separated by whitespace.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' does not parse as a complex number"
default_args = ("x",)
class E04(VOWarning, ValueError):
"""A ``bit`` array should be a string of '0's and '1's.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid bit value '{}'"
default_args = ("x",)
class E05(VOWarning, ValueError):
r"""Invalid boolean value.
A ``boolean`` value should be one of the following strings (case
insensitive) in the ``TABLEDATA`` format::
'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?'
and in ``BINARY`` format::
'T', 'F', '1', '0', '\0', ' ', '?'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid boolean value '{}'"
default_args = ("x",)
class E06(VOWarning, ValueError):
"""Unknown datatype on a field.
The supported datatypes are::
double, float, bit, boolean, unsignedByte, short, int, long,
floatComplex, doubleComplex, char, unicodeChar
The following non-standard aliases are also supported, but in
these case :ref:`W13 <W13>` will be raised::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Unknown datatype '{}' on field '{}'"
default_args = ("x", "y")
# E07: Deprecated
class E08(VOWarning, ValueError):
"""
The ``type`` attribute on the ``VALUES`` element must be either
``legal`` or ``actual``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "type must be 'legal' or 'actual', but is '{}'"
default_args = ("x",)
class E09(VOWarning, ValueError):
"""
The ``MIN``, ``MAX`` and ``OPTION`` elements must always have a
``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "'{}' must have a value attribute"
default_args = ("x",)
class E10(VOWarning, ValueError):
"""
From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have
a ``datatype`` field.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "'datatype' attribute required on all '{}' elements"
default_args = ("FIELD",)
class E11(VOWarning, ValueError):
"""
The precision attribute is meant to express the number of significant
digits, either as a number of decimal places (e.g. ``precision="F2"`` or
equivalently ``precision="2"`` to express 2 significant figures
after the decimal point), or as a number of significant figures
(e.g. ``precision="E5"`` indicates a relative precision of 10-5).
It is validated using the following regular expression::
[EF]?[1-9][0-9]*
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "precision '{}' is invalid"
default_args = ("x",)
class E12(VOWarning, ValueError):
"""
The width attribute is meant to indicate to the application the
number of characters to be used for input or output of the
quantity.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "width must be a positive integer, got '{}'"
default_args = ("x",)
class E13(VOWarning, ValueError):
r"""Invalid arraysize attribute.
From the VOTable 1.2 spec:
A table cell can contain an array of a given primitive type,
with a fixed or variable number of elements; the array may
even be multidimensional. For instance, the position of a
point in a 3D space can be defined by the following::
<FIELD ID="point_3D" datatype="double" arraysize="3"/>
and each cell corresponding to that definition must contain
exactly 3 numbers. An asterisk (\*) may be appended to
indicate a variable number of elements in the array, as in::
<FIELD ID="values" datatype="int" arraysize="100*"/>
where it is specified that each cell corresponding to that
definition contains 0 to 100 integer numbers. The number may
be omitted to specify an unbounded array (in practice up to
=~2×10⁹ elements).
A table cell can also contain a multidimensional array of a
given primitive type. This is specified by a sequence of
dimensions separated by the ``x`` character, with the first
dimension changing fastest; as in the case of a simple array,
the last dimension may be variable in length. As an example,
the following definition declares a table cell which may
contain a set of up to 10 images, each of 64×64 bytes::
<FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/>
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__
"""
message_template = "Invalid arraysize attribute '{}'"
default_args = ("x",)
class E14(VOWarning, ValueError):
"""
All ``PARAM`` elements must have a ``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "value attribute is required for all PARAM elements"
class E15(VOWarning, ValueError):
"""All ``COOSYS`` elements must have an ``ID`` attribute.
Note that the VOTable 1.1 specification says this attribute is
optional, but its corresponding schema indicates it is required.
In VOTable 1.2, the ``COOSYS`` element is deprecated.
"""
message_template = "ID attribute is required for all COOSYS elements"
class E16(VOTableSpecWarning):
"""Incorrect ``system`` attribute on COOSYS element.
The ``system`` attribute must be one of the following::
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', 'geo_app'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:COOSYS>`__
"""
message_template = "Invalid system attribute '{}'"
default_args = ("x",)
class E17(VOWarning, ValueError):
"""
``extnum`` attribute must be a positive integer.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "extnum must be a positive integer"
class E18(VOWarning, ValueError):
"""
The ``type`` attribute of the ``RESOURCE`` element must be one of
"results" or "meta".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "type must be 'results' or 'meta', not '{}'"
default_args = ("x",)
class E19(VOWarning, ValueError):
"""
Raised either when the file doesn't appear to be XML, or the root
element is not VOTABLE.
"""
message_template = "File does not appear to be a VOTABLE"
class E20(VOTableSpecError):
"""
The table had only *x* fields defined, but the data itself has more
columns than that.
"""
message_template = "Data has more columns than are defined in the header ({})"
default_args = ("x",)
class E21(VOWarning, ValueError):
"""
The table had *x* fields defined, but the data itself has only *y*
columns.
"""
message_template = "Data has fewer columns ({}) than are defined in the header ({})"
default_args = ("x", "y")
class E22(VOWarning, ValueError):
"""
All ``TIMESYS`` elements must have an ``ID`` attribute.
"""
message_template = "ID attribute is required for all TIMESYS elements"
class E23(VOTableSpecWarning):
"""
The ``timeorigin`` attribute on the ``TIMESYS`` element must be
either a floating point literal specifying a valid Julian Date,
or, for convenience, the string "MJD-origin" (standing for 2400000.5)
or the string "JD-origin" (standing for 0).
**References**: `1.4
<http://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC21>`__
"""
message_template = "Invalid timeorigin attribute '{}'"
default_args = ("x",)
class E24(VOWarning, ValueError):
"""
Non-ASCII unicode values should not be written when the FIELD ``datatype="char"``,
and cannot be written in BINARY or BINARY2 serialization.
"""
message_template = (
'Attempt to write non-ASCII value ({}) to FIELD ({}) which has datatype="char"'
)
default_args = ("", "")
class E25(VOTableSpecWarning):
"""
A VOTable cannot have a DATA section without any defined FIELD; DATA will be ignored.
"""
message_template = "No FIELDs are defined; DATA section will be ignored."
def _get_warning_and_exception_classes(prefix):
classes = []
for key, val in globals().items():
if re.match(prefix + "[0-9]{2}", key):
classes.append((key, val))
classes.sort()
return classes
def _build_doc_string():
def generate_set(prefix):
classes = _get_warning_and_exception_classes(prefix)
out = io.StringIO()
for name, cls in classes:
out.write(f".. _{name}:\n\n")
msg = f"{cls.__name__}: {cls.get_short_name()}"
if not isinstance(msg, str):
msg = msg.decode("utf-8")
out.write(msg)
out.write("\n")
out.write("~" * len(msg))
out.write("\n\n")
doc = cls.__doc__
if not isinstance(doc, str):
doc = doc.decode("utf-8")
out.write(dedent(doc))
out.write("\n\n")
return out.getvalue()
warnings = generate_set("W")
exceptions = generate_set("E")
return {"warnings": warnings, "exceptions": exceptions}
if __doc__ is not None:
__doc__ = __doc__.format(**_build_doc_string())
__all__.extend([x[0] for x in _get_warning_and_exception_classes("W")])
__all__.extend([x[0] for x in _get_warning_and_exception_classes("E")])
|
73b77a629e33d68f9d6714be4785b93b203b6f7600ee532b0ce2e8080ffe1dde | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains routines to verify the correctness of UCD strings.
"""
# STDLIB
import re
# LOCAL
from astropy.utils import data
__all__ = ["parse_ucd", "check_ucd"]
class UCDWords:
"""
Manages a list of acceptable UCD words.
Works by reading in a data file exactly as provided by IVOA. This
file resides in data/ucd1p-words.txt.
"""
def __init__(self):
self._primary = set()
self._secondary = set()
self._descriptions = {}
self._capitalization = {}
with data.get_pkg_data_fileobj("data/ucd1p-words.txt", encoding="ascii") as fd:
for line in fd.readlines():
if line.startswith("#"):
continue
type, name, descr = (x.strip() for x in line.split("|"))
name_lower = name.lower()
if type in "QPEVC":
self._primary.add(name_lower)
if type in "QSEVC":
self._secondary.add(name_lower)
self._descriptions[name_lower] = descr
self._capitalization[name_lower] = name
def is_primary(self, name):
"""
Returns True if *name* is a valid primary name.
"""
return name.lower() in self._primary
def is_secondary(self, name):
"""
Returns True if *name* is a valid secondary name.
"""
return name.lower() in self._secondary
def get_description(self, name):
"""
Returns the official English description of the given UCD
*name*.
"""
return self._descriptions[name.lower()]
def normalize_capitalization(self, name):
"""
Returns the standard capitalization form of the given name.
"""
return self._capitalization[name.lower()]
_ucd_singleton = None
def parse_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Parse the UCD into its component parts.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
parts : list
The result is a list of tuples of the form:
(*namespace*, *word*)
If no namespace was explicitly specified, *namespace* will be
returned as ``'ivoa'`` (i.e., the default namespace).
Raises
------
ValueError
if *ucd* is invalid
"""
global _ucd_singleton
if _ucd_singleton is None:
_ucd_singleton = UCDWords()
if has_colon:
m = re.search(r"[^A-Za-z0-9_.:;\-]", ucd)
else:
m = re.search(r"[^A-Za-z0-9_.;\-]", ucd)
if m is not None:
raise ValueError(f"UCD has invalid character '{m.group(0)}' in '{ucd}'")
word_component_re = r"[A-Za-z0-9][A-Za-z0-9\-_]*"
word_re = rf"{word_component_re}(\.{word_component_re})*"
parts = ucd.split(";")
words = []
for i, word in enumerate(parts):
colon_count = word.count(":")
if colon_count == 1:
ns, word = word.split(":", 1)
if not re.match(word_component_re, ns):
raise ValueError(f"Invalid namespace '{ns}'")
ns = ns.lower()
elif colon_count > 1:
raise ValueError(f"Too many colons in '{word}'")
else:
ns = "ivoa"
if not re.match(word_re, word):
raise ValueError(f"Invalid word '{word}'")
if ns == "ivoa" and check_controlled_vocabulary:
if i == 0:
if not _ucd_singleton.is_primary(word):
if _ucd_singleton.is_secondary(word):
raise ValueError(
f"Secondary word '{word}' is not valid as a primary word"
)
else:
raise ValueError(f"Unknown word '{word}'")
else:
if not _ucd_singleton.is_secondary(word):
if _ucd_singleton.is_primary(word):
raise ValueError(
f"Primary word '{word}' is not valid as a secondary word"
)
else:
raise ValueError(f"Unknown word '{word}'")
try:
normalized_word = _ucd_singleton.normalize_capitalization(word)
except KeyError:
normalized_word = word
words.append((ns, normalized_word))
return words
def check_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Returns False if *ucd* is not a valid `unified content descriptor`_.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
valid : bool
"""
if ucd is None:
return True
try:
parse_ucd(
ucd,
check_controlled_vocabulary=check_controlled_vocabulary,
has_colon=has_colon,
)
except ValueError:
return False
return True
|
8623a1c708cb5a991907ef00dcb20a964d65950d1a1bff48b109569b638d45e3 | """
This module contains the FITS compression algorithms in numcodecs style Codecs.
"""
from gzip import compress as gzip_compress
from gzip import decompress as gzip_decompress
import numpy as np
from astropy.io.fits._tiled_compression._compression import (
compress_hcompress_1_c,
compress_plio_1_c,
compress_rice_1_c,
decompress_hcompress_1_c,
decompress_plio_1_c,
decompress_rice_1_c,
)
# If numcodecs is installed, we use Codec as a base class for the codecs below
# so that they can optionally be used as codecs in any package relying on
# numcodecs - however this is optional and if numcodecs is not installed we use
# our own base class. This does not affect any compressed data functionality
# in astropy.io.fits.
try:
from numcodecs.abc import Codec
except ImportError:
class Codec:
codec_id = None
__all__ = [
"Gzip1",
"Gzip2",
"Rice1",
"PLIO1",
"HCompress1",
"NoCompress",
]
def _as_big_endian_array(data):
return data.astype(np.asarray(data).dtype.newbyteorder(">"), copy=False)
def _as_native_endian_array(data):
if data.dtype.isnative:
return data
else:
return data.astype(np.asarray(data).dtype.newbyteorder("="), copy=False)
class NoCompress(Codec):
"""
A dummy compression/decompression algorithm that stores the data as-is.
While the data is not compressed/decompressed, it is converted to big
endian during encoding as this is what is expected in FITS files.
"""
codec_id = "FITS_NOCOMPRESS"
def decode(self, buf):
"""
Decompress buffer using the NOCOMPRESS algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
return np.frombuffer(buf, dtype=np.uint8)
def encode(self, buf):
"""
Compress the data in the buffer using the NOCOMPRESS algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
return _as_big_endian_array(buf).tobytes()
class Gzip1(Codec):
"""
The FITS GZIP 1 compression and decompression algorithm.
The Gzip algorithm is used in the free GNU software compression utility of
the same name. It was created by J. L. Gailly and M. Adler, based on the
DEFLATE algorithm (Deutsch 1996), which is a combination of LZ77 (Ziv &
Lempel 1977) and Huffman coding.
"""
codec_id = "FITS_GZIP1"
def decode(self, buf):
"""
Decompress buffer using the GZIP_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
# In principle we should be able to not have .tobytes() here and avoid
# the copy but this does not work correctly in Python 3.11.
cbytes = np.frombuffer(buf, dtype=np.uint8).tobytes()
dbytes = gzip_decompress(cbytes)
return np.frombuffer(dbytes, dtype=np.uint8)
def encode(self, buf):
"""
Compress the data in the buffer using the GZIP_1 algorithm.
Parameters
----------
buf _like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
# Data bytes should be stored as big endian in files
# In principle we should be able to not have .tobytes() here and avoid
# the copy but this does not work correctly in Python 3.11.
dbytes = _as_big_endian_array(buf).tobytes()
return gzip_compress(dbytes)
class Gzip2(Codec):
"""
The FITS GZIP2 compression and decompression algorithm.
The gzip2 algorithm is a variation on 'GZIP 1'. In this case the buffer in
the array of data values are shuffled so that they are arranged in order of
decreasing significance before being compressed.
For example, a five-element contiguous array of two-byte (16-bit) integer
values, with an original big-endian byte order of:
.. math::
A1 A2 B1 B2 C1 C2 D1 D2 E1 E2
will have the following byte order after shuffling:
.. math::
A1 B1 C1 D1 E1 A2 B2 C2 D2 E2,
where A1, B1, C1, D1, and E1 are the most-significant buffer from
each of the integer values.
Byte shuffling shall only be performed for integer or floating-point
numeric data types; logical, bit, and character types must not be shuffled.
Parameters
----------
itemsize
The number of buffer per value (e.g. 2 for a 16-bit integer)
"""
codec_id = "FITS_GZIP2"
def __init__(self, *, itemsize: int):
super().__init__()
self.itemsize = itemsize
def decode(self, buf):
"""
Decompress buffer using the GZIP_2 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
cbytes = np.frombuffer(buf, dtype=np.uint8).tobytes()
# Start off by unshuffling buffer
unshuffled_buffer = gzip_decompress(cbytes)
array = np.frombuffer(unshuffled_buffer, dtype=np.uint8)
return array.reshape((self.itemsize, -1)).T.ravel()
def encode(self, buf):
"""
Compress the data in the buffer using the GZIP_2 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
# Data bytes should be stored as big endian in files
array = _as_big_endian_array(buf).ravel()
# Shuffle the buffer
itemsize = array.dtype.itemsize
array = array.view(np.uint8)
shuffled_buffer = array.reshape((-1, itemsize)).T.ravel().tobytes()
return gzip_compress(shuffled_buffer)
class Rice1(Codec):
"""
The FITS RICE1 compression and decompression algorithm.
The Rice algorithm [1]_ is simple and very fast It requires only enough
memory to hold a single block of 16 or 32 pixels at a time. It codes the
pixels in small blocks and so is able to adapt very quickly to changes in
the input image statistics (e.g., Rice has no problem handling cosmic rays,
bright stars, saturated pixels, etc.).
Parameters
----------
blocksize
The blocksize to use, each tile is coded into blocks a number of pixels
wide. The default value in FITS headers is 32 pixels per block.
bytepix
The number of 8-bit buffer in each original integer pixel value.
References
----------
.. [1] Rice, R. F., Yeh, P.-S., and Miller, W. H. 1993, in Proc. of the 9th
AIAA Computing in Aerospace Conf., AIAA-93-4541-CP, American Institute of
Aeronautics and Astronautics [https://doi.org/10.2514/6.1993-4541]
"""
codec_id = "FITS_RICE1"
def __init__(self, *, blocksize: int, bytepix: int, tilesize: int):
self.blocksize = blocksize
self.bytepix = bytepix
self.tilesize = tilesize
def decode(self, buf):
"""
Decompress buffer using the RICE_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
cbytes = np.frombuffer(_as_native_endian_array(buf), dtype=np.uint8).tobytes()
dbytes = decompress_rice_1_c(
cbytes, self.blocksize, self.bytepix, self.tilesize
)
return np.frombuffer(dbytes, dtype=f"i{self.bytepix}")
def encode(self, buf):
"""
Compress the data in the buffer using the RICE_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
# We convert the data to native endian because it is passed to the
# C compression code which will interpret it as being native endian.
dbytes = (
_as_native_endian_array(buf)
.astype(f"i{self.bytepix}", copy=False)
.tobytes()
)
return compress_rice_1_c(dbytes, self.blocksize, self.bytepix)
class PLIO1(Codec):
"""
The FITS PLIO1 compression and decompression algorithm.
The IRAF PLIO (pixel list) algorithm was developed to store integer-valued
image masks in a compressed form. Such masks often have large regions of
constant value hence are highly compressible. The compression algorithm
used is based on run-length encoding, with the ability to dynamically
follow level changes in the image, allowing a 16-bit encoding to be used
regardless of the image depth.
"""
codec_id = "FITS_PLIO1"
def __init__(self, *, tilesize: int):
self.tilesize = tilesize
def decode(self, buf):
"""
Decompress buffer using the PLIO_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
cbytes = np.frombuffer(_as_native_endian_array(buf), dtype=np.uint8).tobytes()
dbytes = decompress_plio_1_c(cbytes, self.tilesize)
return np.frombuffer(dbytes, dtype="i4")
def encode(self, buf):
"""
Compress the data in the buffer using the PLIO_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
# We convert the data to native endian because it is passed to the
# C compression code which will interpret it as being native endian.
dbytes = _as_native_endian_array(buf).astype("i4", copy=False).tobytes()
return compress_plio_1_c(dbytes, self.tilesize)
class HCompress1(Codec):
"""
The FITS HCompress compression and decompression algorithm.
Hcompress is an the image compression package written by Richard L. White
for use at the Space Telescope Science Institute. Hcompress was used to
compress the STScI Digitized Sky Survey and has also been used to compress
the preview images in the Hubble Data Archive.
The technique gives very good compression for astronomical images and is
relatively fast. The calculations are carried out using integer arithmetic
and are entirely reversible. Consequently, the program can be used for
either lossy or lossless compression, with no special approach needed for
the lossless case.
Parameters
----------
scale
The integer scale parameter determines the amount of compression. Scale
= 0 or 1 leads to lossless compression, i.e. the decompressed image has
exactly the same pixel values as the original image. If the scale
factor is greater than 1 then the compression is lossy: the
decompressed image will not be exactly the same as the original
smooth
At high compressions factors the decompressed image begins to appear
blocky because of the way information is discarded. This blockiness
ness is greatly reduced, producing more pleasing images, if the image
is smoothed slightly during decompression.
References
----------
.. [1] White, R. L. 1992, in Proceedings of the NASA Space and Earth Science
Data Compression Workshop, ed. J. C. Tilton, Snowbird, UT;
https://archive.org/details/nasa_techdoc_19930016742
"""
codec_id = "FITS_HCOMPRESS1"
def __init__(self, *, scale: int, smooth: bool, bytepix: int, nx: int, ny: int):
self.scale = scale
self.smooth = smooth
self.bytepix = bytepix
# NOTE: we should probably make this less confusing, but nx is shape[0] and ny is shape[1]
self.nx = nx
self.ny = ny
def decode(self, buf):
"""
Decompress buffer using the HCOMPRESS_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
cbytes = np.frombuffer(_as_native_endian_array(buf), dtype=np.uint8).tobytes()
dbytes = decompress_hcompress_1_c(
cbytes, self.nx, self.ny, self.scale, self.smooth, self.bytepix
)
# fits_hdecompress* always returns 4 byte integers irrespective of bytepix
return np.frombuffer(dbytes, dtype="i4")
def encode(self, buf):
"""
Compress the data in the buffer using the HCOMPRESS_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
# We convert the data to native endian because it is passed to the
# C compression code which will interpret it as being native endian.
dbytes = (
_as_native_endian_array(buf)
.astype(f"i{self.bytepix}", copy=False)
.tobytes()
)
return compress_hcompress_1_c(
dbytes, self.nx, self.ny, self.scale, self.bytepix
)
|
a5c18845578eb212c4f7262ae2d64fd8e622dd1a306a424966419be5c3018dab | # Licensed under a 3-clause BSD style license
import os
from setuptools import Extension
SRC_DIR = os.path.join(os.path.dirname(__file__), "src")
def get_extensions():
return [
Extension(
"astropy.io.fits._tiled_compression._compression",
sources=[
os.path.join(SRC_DIR, "compression.c"),
os.path.join(SRC_DIR, "unquantize.c"),
os.path.join("cextern", "cfitsio", "lib", "pliocomp.c"),
os.path.join("cextern", "cfitsio", "lib", "ricecomp.c"),
os.path.join("cextern", "cfitsio", "lib", "fits_hcompress.c"),
os.path.join("cextern", "cfitsio", "lib", "fits_hdecompress.c"),
os.path.join("cextern", "cfitsio", "lib", "quantize.c"),
],
include_dirs=[SRC_DIR],
)
]
|
621b20e589f52ddcdb3e5e3c7b4a1ec92e6c22a324b5e609da440dda6aa9b568 | from math import ceil
import numpy as np
def _n_tiles(data_shape, tile_shape):
return [int(ceil(d / t)) for d, t in zip(data_shape, tile_shape)]
def _iter_array_tiles(
data_shape, tile_shape, first_tile_index=None, last_tile_index=None
):
ndim = len(tile_shape)
n_tiles = _n_tiles(data_shape, tile_shape)
if first_tile_index is None:
first_tile_index = (0,) * ndim
if last_tile_index is None:
last_tile_index = tuple(n - 1 for n in n_tiles)
tile_index = list(first_tile_index)
while True:
tile_slices = tuple(
slice(
(tile_index[idx] - first_tile_index[idx]) * tile_shape[idx],
(tile_index[idx] - first_tile_index[idx] + 1) * tile_shape[idx],
)
for idx in range(ndim)
)
row_index = tile_index[0]
for dim in range(1, ndim):
row_index = tile_index[dim] + row_index * n_tiles[dim]
yield row_index, tile_slices
tile_index[-1] += 1
for idx in range(ndim - 1, 0, -1):
if tile_index[idx] > last_tile_index[idx]:
tile_index[idx] = first_tile_index[idx]
tile_index[idx - 1] += 1
if tile_index[0] > last_tile_index[0]:
break
def _tile_shape(header):
return np.array(
[header[f"ZTILE{idx}"] for idx in range(header["ZNAXIS"], 0, -1)], dtype=int
)
def _data_shape(header):
return np.array(
[header[f"ZNAXIS{idx}"] for idx in range(header["ZNAXIS"], 0, -1)], dtype=int
)
|
7a2fab13218050a089a1871d954f32f06b391f465d6026eb315c4ed67f9aa797 | """
This module contains low level helper functions for compressing and
decompressing buffer for the Tiled Table Compression algorithms as specified in
the FITS 4 standard.
"""
import sys
from math import prod
import numpy as np
from astropy.io.fits.hdu.base import BITPIX2DTYPE
from .codecs import PLIO1, Gzip1, Gzip2, HCompress1, NoCompress, Rice1
from .quantization import DITHER_METHODS, QuantizationFailedException, Quantize
from .utils import _data_shape, _iter_array_tiles, _tile_shape
ALGORITHMS = {
"GZIP_1": Gzip1,
"GZIP_2": Gzip2,
"RICE_1": Rice1,
"RICE_ONE": Rice1,
"PLIO_1": PLIO1,
"HCOMPRESS_1": HCompress1,
"NOCOMPRESS": NoCompress,
}
DEFAULT_ZBLANK = -2147483648
__all__ = [
"compress_hdu",
"decompress_hdu_section",
]
def _decompress_tile(buf, *, algorithm: str, **settings):
"""
Decompress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The compressed buffer to be decompressed.
algorithm
A supported decompression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).decode(buf)
def _compress_tile(buf, *, algorithm: str, **settings):
"""
Compress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The decompressed buffer to be compressed.
algorithm
A supported compression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).encode(buf)
def _header_to_settings(header):
"""
Extract the settings which are constant given a header
"""
settings = {}
compression_type = header["ZCMPTYPE"]
if compression_type == "GZIP_2":
settings["itemsize"] = abs(header["ZBITPIX"]) // 8
elif compression_type in ("RICE_1", "RICE_ONE"):
settings["blocksize"] = _get_compression_setting(header, "BLOCKSIZE", 32)
settings["bytepix"] = _get_compression_setting(header, "BYTEPIX", 4)
elif compression_type == "HCOMPRESS_1":
settings["bytepix"] = 8
settings["scale"] = int(_get_compression_setting(header, "SCALE", 0))
settings["smooth"] = _get_compression_setting(header, "SMOOTH", 0)
return settings
def _update_tile_settings(settings, compression_type, actual_tile_shape):
"""
Update the settings with tile-specific settings
"""
if compression_type in ("PLIO_1", "RICE_1", "RICE_ONE"):
# We have to calculate the tilesize from the shape of the tile not the
# header, so that it's correct for edge tiles etc.
settings["tilesize"] = prod(actual_tile_shape)
elif compression_type == "HCOMPRESS_1":
# HCOMPRESS requires 2D tiles, so to find the shape of the 2D tile we
# need to ignore all length 1 tile dimensions
# Also cfitsio expects the tile shape in C order
shape_2d = tuple(nd for nd in actual_tile_shape if nd != 1)
if len(shape_2d) != 2:
raise ValueError(f"HCOMPRESS expects two dimensional tiles, got {shape_2d}")
settings["nx"] = shape_2d[0]
settings["ny"] = shape_2d[1]
return settings
def _finalize_array(tile_buffer, *, bitpix, tile_shape, algorithm, lossless):
"""
Convert a buffer to an array.
This is a helper function which takes a raw buffer (as output by .decode)
and translates it into a numpy array with the correct dtype, endianness and
shape.
"""
tile_size = prod(tile_shape)
if algorithm.startswith("GZIP") or algorithm == "NOCOMPRESS":
# This algorithm is taken from fitsio
# https://github.com/astropy/astropy/blob/a8cb1668d4835562b89c0d0b3448ac72ca44db63/cextern/cfitsio/lib/imcompress.c#L6345-L6388
tile_bytesize = len(tile_buffer)
if tile_bytesize == tile_size * 2:
dtype = ">i2"
elif tile_bytesize == tile_size * 4:
if bitpix < 0 and lossless:
dtype = ">f4"
else:
dtype = ">i4"
elif tile_bytesize == tile_size * 8:
if bitpix < 0 and lossless:
dtype = ">f8"
else:
dtype = ">i8"
else:
# Just return the raw bytes
dtype = ">u1"
tile_data = np.asarray(tile_buffer).view(dtype).reshape(tile_shape)
else:
# For RICE_1 compression the tiles that are on the edge can end up
# being padded, so we truncate excess values
if algorithm in ("RICE_1", "RICE_ONE", "PLIO_1") and tile_size < len(
tile_buffer
):
tile_buffer = tile_buffer[:tile_size]
if tile_buffer.data.format == "b":
# NOTE: this feels like a Numpy bug - need to investigate
tile_data = np.asarray(tile_buffer, dtype=np.uint8).reshape(tile_shape)
else:
tile_data = np.asarray(tile_buffer).reshape(tile_shape)
return tile_data
def _check_compressed_header(header):
# NOTE: this could potentially be moved up into CompImageHDU, e.g. in a
# _verify method.
# Check for overflows which might cause issues when calling C code
for kw in ["ZNAXIS", "ZVAL1", "ZVAL2", "ZBLANK", "BLANK"]:
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.intc).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for i in range(1, header["ZNAXIS"] + 1):
for kw_name in ["ZNAXIS", "ZTILE"]:
kw = f"{kw_name}{i}"
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int32).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for i in range(1, header["NAXIS"] + 1):
kw = f"NAXIS{i}"
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int64).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for kw in ["TNULL1", "PCOUNT", "THEAP"]:
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int64).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for kw in ["ZVAL3"]:
if kw in header:
if header[kw] > np.finfo(np.float32).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
# Validate data types
for kw in ["ZSCALE", "ZZERO", "TZERO1", "TSCAL1"]:
if kw in header:
if not np.isreal(header[kw]):
raise TypeError(f"{kw} should be floating-point")
for kw in ["TTYPE1", "TFORM1", "ZCMPTYPE", "ZNAME1", "ZQUANTIZ"]:
if kw in header:
if not isinstance(header[kw], str):
raise TypeError(f"{kw} should be a string")
for kw in ["ZDITHER0"]:
if kw in header:
if not np.isreal(header[kw]) or not float(header[kw]).is_integer():
raise TypeError(f"{kw} should be an integer")
if "TFORM1" in header:
for valid in ["1PB", "1PI", "1PJ", "1QB", "1QI", "1QJ"]:
if header["TFORM1"].startswith(valid):
break
else:
raise RuntimeError(f"Invalid TFORM1: {header['TFORM1']}")
# Check values
for kw in ["TFIELDS", "PCOUNT"] + [
f"NAXIS{idx + 1}" for idx in range(header["NAXIS"])
]:
if kw in header:
if header[kw] < 0:
raise ValueError(f"{kw} should not be negative.")
for kw in ["ZNAXIS", "TFIELDS"]:
if kw in header:
if header[kw] < 0 or header[kw] > 999:
raise ValueError(f"{kw} should be in the range 0 to 999")
if header["ZBITPIX"] not in [8, 16, 32, 64, -32, -64]:
raise ValueError(f"Invalid value for BITPIX: {header['ZBITPIX']}")
if header["ZCMPTYPE"] not in ALGORITHMS:
raise ValueError(f"Unrecognized compression type: {header['ZCMPTYPE']}")
# Check that certain keys are present
header["ZNAXIS"]
header["ZBITPIX"]
def _get_compression_setting(header, name, default):
# Settings for the various compression algorithms are stored in pairs of
# keywords called ZNAME? and ZVAL? - a given compression setting could be
# in any ZNAME? so we need to check through all the possible ZNAMEs which
# one matches the required setting.
for i in range(1, 1000):
if f"ZNAME{i}" not in header:
break
if header[f"ZNAME{i}"].lower() == name.lower():
return header[f"ZVAL{i}"]
return default
def _column_dtype(hdu, column_name):
tform = hdu.columns[column_name].format
if tform[2] == "B":
dtype = np.uint8
elif tform[2] == "I":
dtype = ">i2"
elif tform[2] == "J":
dtype = ">i4"
return np.dtype(dtype)
def _get_data_from_heap(hdu, size, offset, dtype, heap_cache=None):
if heap_cache is None:
return hdu._get_raw_data(size, dtype, hdu._data_offset + hdu._theap + offset)
else:
itemsize = dtype.itemsize
data = heap_cache[offset : offset + size * itemsize]
if itemsize > 1:
return data.view(dtype)
else:
return data
def decompress_hdu_section(hdu, first_tile_index, last_tile_index):
"""
Decompress the data in a `~astropy.io.fits.CompImageHDU`.
Parameters
----------
hdu : `astropy.io.fits.CompImageHDU`
Input HDU to decompress the data for.
Returns
-------
data : `numpy.ndarray`
The decompressed data array.
"""
_check_compressed_header(hdu._header)
tile_shape = _tile_shape(hdu._header)
data_shape = _data_shape(hdu._header)
first_array_index = first_tile_index * tile_shape
last_array_index = (last_tile_index + 1) * tile_shape
last_array_index = np.minimum(data_shape, last_array_index)
buffer_shape = tuple((last_array_index - first_array_index).astype(int))
data = np.empty(buffer_shape, dtype=BITPIX2DTYPE[hdu._header["ZBITPIX"]])
quantized = "ZSCALE" in hdu.compressed_data.dtype.names
if data.size == 0:
return data
settings = _header_to_settings(hdu._header)
compression_type = hdu.compression_type
zbitpix = hdu._header["ZBITPIX"]
dither_method = DITHER_METHODS[hdu._header.get("ZQUANTIZ", "NO_DITHER")]
dither_seed = hdu._header.get("ZDITHER0", 0)
# NOTE: in the following and below we convert the column to a Numpy array
# for performance reasons, as accessing rows from a FITS_rec column is
# otherwise slow.
compressed_data_column = np.array(hdu.compressed_data["COMPRESSED_DATA"])
compressed_data_dtype = _column_dtype(hdu, "COMPRESSED_DATA")
if "ZBLANK" in hdu.columns.dtype.names:
zblank_column = np.array(hdu.compressed_data["ZBLANK"])
else:
zblank_column = None
if "ZSCALE" in hdu.columns.dtype.names:
zscale_column = np.array(hdu.compressed_data["ZSCALE"])
else:
zscale_column = None
if "ZZERO" in hdu.columns.dtype.names:
zzero_column = np.array(hdu.compressed_data["ZZERO"])
else:
zzero_column = None
zblank_header = hdu._header.get("ZBLANK", None)
gzip_compressed_data_column = None
gzip_compressed_data_dtype = None
# If all the data is requested, read in all the heap.
if tuple(buffer_shape) == tuple(data_shape):
heap_cache = hdu._get_raw_data(
hdu._header["PCOUNT"], np.uint8, hdu._data_offset + hdu._theap
)
else:
heap_cache = None
for row_index, tile_slices in _iter_array_tiles(
data_shape, tile_shape, first_tile_index, last_tile_index
):
# For tiles near the edge, the tile shape from the header might not be
# correct so we have to pass the shape manually.
actual_tile_shape = data[tile_slices].shape
settings = _update_tile_settings(settings, compression_type, actual_tile_shape)
if compressed_data_column[row_index][0] == 0:
if gzip_compressed_data_column is None:
gzip_compressed_data_column = np.array(
hdu.compressed_data["GZIP_COMPRESSED_DATA"]
)
gzip_compressed_data_dtype = _column_dtype(hdu, "GZIP_COMPRESSED_DATA")
# When quantizing floating point data, sometimes the data will not
# quantize efficiently. In these cases the raw floating point data can
# be losslessly GZIP compressed and stored in the `GZIP_COMPRESSED_DATA`
# column.
cdata = _get_data_from_heap(
hdu,
*gzip_compressed_data_column[row_index],
gzip_compressed_data_dtype,
heap_cache=heap_cache,
)
tile_buffer = _decompress_tile(cdata, algorithm="GZIP_1")
tile_data = _finalize_array(
tile_buffer,
bitpix=zbitpix,
tile_shape=actual_tile_shape,
algorithm="GZIP_1",
lossless=True,
)
else:
cdata = _get_data_from_heap(
hdu,
*compressed_data_column[row_index],
compressed_data_dtype,
heap_cache=heap_cache,
)
if compression_type == "GZIP_2":
# Decompress with GZIP_1 just to find the total number of
# elements in the uncompressed data.
# TODO: find a way to avoid doing this for all tiles
tile_data = np.asarray(_decompress_tile(cdata, algorithm="GZIP_1"))
settings["itemsize"] = tile_data.size // int(prod(actual_tile_shape))
tile_buffer = _decompress_tile(
cdata, algorithm=compression_type, **settings
)
tile_data = _finalize_array(
tile_buffer,
bitpix=zbitpix,
tile_shape=actual_tile_shape,
algorithm=compression_type,
lossless=not quantized,
)
if zblank_column is None:
zblank = zblank_header
else:
zblank = zblank_column[row_index]
if zblank is not None:
blank_mask = tile_data == zblank
if quantized:
q = Quantize(
row=(row_index + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=None,
bitpix=zbitpix,
)
tile_data = np.asarray(
q.decode_quantized(
tile_data, zscale_column[row_index], zzero_column[row_index]
)
).reshape(actual_tile_shape)
if zblank is not None:
if not tile_data.flags.writeable:
tile_data = tile_data.copy()
tile_data[blank_mask] = np.nan
data[tile_slices] = tile_data
return data
def compress_hdu(hdu):
"""
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
hdu : `astropy.io.fits.CompImageHDU`
Input HDU to compress the data for.
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap.
"""
if not isinstance(hdu.data, np.ndarray):
raise TypeError("CompImageHDU.data must be a numpy.ndarray")
_check_compressed_header(hdu._header)
# TODO: This implementation is memory inefficient as it generates all the
# compressed bytes before forming them into the heap, leading to 2x the
# potential memory usage. Directly storing the compressed bytes into an
# expanding heap would fix this.
tile_shape = _tile_shape(hdu._header)
data_shape = _data_shape(hdu._header)
compressed_bytes = []
gzip_fallback = []
scales = []
zeros = []
zblank = None
noisebit = _get_compression_setting(hdu._header, "noisebit", 0)
settings = _header_to_settings(hdu._header)
compression_type = hdu.compression_type
for irow, tile_slices in _iter_array_tiles(data_shape, tile_shape):
data = hdu.data[tile_slices]
settings = _update_tile_settings(settings, compression_type, data.shape)
quantize = "ZSCALE" in hdu.columns.dtype.names
if data.dtype.kind == "f" and quantize:
dither_method = DITHER_METHODS[hdu._header.get("ZQUANTIZ", "NO_DITHER")]
dither_seed = hdu._header.get("ZDITHER0", 0)
q = Quantize(
row=(irow + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=noisebit,
bitpix=hdu._header["ZBITPIX"],
)
original_shape = data.shape
# If there are any NaN values in the data, we should reset them to
# a value that will not affect the quantization (an already existing
# data value in the array) and we can then reset this after quantization
# to ZBLANK and set the appropriate header keyword
nan_mask = np.isnan(data)
any_nan = np.any(nan_mask)
if any_nan:
# Note that we need to copy here to avoid modifying the input array.
data = data.copy()
if np.all(nan_mask):
data[nan_mask] = 0
else:
data[nan_mask] = np.nanmin(data)
try:
data, scale, zero = q.encode_quantized(data)
except QuantizationFailedException:
if any_nan:
# reset NaN values since we will losslessly compress.
data[nan_mask] = np.nan
scales.append(0)
zeros.append(0)
gzip_fallback.append(True)
else:
data = np.asarray(data).reshape(original_shape)
if any_nan:
if not data.flags.writeable:
data = data.copy()
# For now, we just use the default ZBLANK value and assume
# this is the same for all tiles. We could generalize this
# to allow different ZBLANK values (for example if the data
# includes this value by chance) and to allow different values
# per tile, which is allowed by the FITS standard.
data[nan_mask] = DEFAULT_ZBLANK
zblank = DEFAULT_ZBLANK
scales.append(scale)
zeros.append(zero)
gzip_fallback.append(False)
else:
scales.append(0)
zeros.append(0)
gzip_fallback.append(False)
if gzip_fallback[-1]:
cbytes = _compress_tile(data, algorithm="GZIP_1")
else:
cbytes = _compress_tile(data, algorithm=compression_type, **settings)
compressed_bytes.append(cbytes)
if zblank is not None:
hdu._header["ZBLANK"] = zblank
table = np.zeros(len(compressed_bytes), dtype=hdu.columns.dtype.newbyteorder(">"))
if "ZSCALE" in table.dtype.names:
table["ZSCALE"] = np.array(scales)
table["ZZERO"] = np.array(zeros)
for irow, cbytes in enumerate(compressed_bytes):
table["COMPRESSED_DATA"][irow, 0] = len(cbytes)
table["COMPRESSED_DATA"][:1, 1] = 0
table["COMPRESSED_DATA"][1:, 1] = np.cumsum(table["COMPRESSED_DATA"][:-1, 0])
for irow in range(len(compressed_bytes)):
if gzip_fallback[irow]:
table["GZIP_COMPRESSED_DATA"][irow] = table["COMPRESSED_DATA"][irow]
table["COMPRESSED_DATA"][irow] = 0
# For PLIO_1, the size of each heap element is a factor of two lower than
# the real size - not clear if this is deliberate or bug somewhere.
if compression_type == "PLIO_1":
table["COMPRESSED_DATA"][:, 0] //= 2
# For PLIO_1, it looks like the compressed data is always stored big endian
if compression_type == "PLIO_1":
for irow in range(len(compressed_bytes)):
if not gzip_fallback[irow]:
array = np.frombuffer(compressed_bytes[irow], dtype="i2")
if array.dtype.byteorder == "<" or (
array.dtype.byteorder == "=" and sys.byteorder == "little"
):
compressed_bytes[irow] = array.astype(">i2", copy=False).tobytes()
compressed_bytes = b"".join(compressed_bytes)
table_bytes = table.tobytes()
if len(table_bytes) != hdu._theap:
raise Exception(
f"Unexpected compressed table size (expected {hdu._theap}, got {len(table_bytes)})"
)
heap = table.tobytes() + compressed_bytes
return len(compressed_bytes), np.frombuffer(heap, dtype=np.uint8)
|
d5344de6cd1e7390682ecbe71479a4acb166f8fde4ea3e05fecadec505b9a984 | """
This file contains the code for Quantizing / Dequantizing floats.
"""
import numpy as np
from astropy.io.fits._tiled_compression._compression import (
quantize_double_c,
quantize_float_c,
unquantize_double_c,
unquantize_float_c,
)
from astropy.io.fits.hdu.base import BITPIX2DTYPE
__all__ = ["Quantize"]
DITHER_METHODS = {
"NONE": 0,
"NO_DITHER": -1,
"SUBTRACTIVE_DITHER_1": 1,
"SUBTRACTIVE_DITHER_2": 2,
}
class QuantizationFailedException(Exception):
pass
class Quantize:
"""
Quantization of floating-point data following the FITS standard.
"""
def __init__(
self, *, row: int, dither_method: int, quantize_level: int, bitpix: int
):
super().__init__()
self.row = row
# TODO: pass dither method as a string instead of int?
self.quantize_level = quantize_level
self.dither_method = dither_method
self.bitpix = bitpix
# NOTE: below we use decode_quantized and encode_quantized instead of
# decode and encode as we need to break with the numcodec API and take/return
# scale and zero in addition to quantized value. We should figure out how
# to properly use the numcodec API for this use case.
def decode_quantized(self, buf, scale, zero):
"""
Unquantize data.
Parameters
----------
buf : bytes or array_like
The buffer to unquantize.
Returns
-------
np.ndarray
The unquantized buffer.
"""
qbytes = np.asarray(buf)
qbytes = qbytes.astype(qbytes.dtype.newbyteorder("="))
# TODO: figure out if we need to support null checking
if self.dither_method == -1:
# For NO_DITHER we should just use the scale and zero directly
return qbytes * scale + zero
if self.bitpix == -32:
ubytes = unquantize_float_c(
qbytes.tobytes(),
self.row,
qbytes.size,
scale,
zero,
self.dither_method,
0,
0,
0.0,
qbytes.dtype.itemsize,
)
elif self.bitpix == -64:
ubytes = unquantize_double_c(
qbytes.tobytes(),
self.row,
qbytes.size,
scale,
zero,
self.dither_method,
0,
0,
0.0,
qbytes.dtype.itemsize,
)
else:
raise TypeError("bitpix should be one of -32 or -64")
return np.frombuffer(ubytes, dtype=BITPIX2DTYPE[self.bitpix]).data
def encode_quantized(self, buf):
"""
Quantize data.
Parameters
----------
buf : bytes or array_like
The buffer to quantize.
Returns
-------
np.ndarray
A buffer with quantized data.
"""
uarray = np.asarray(buf)
uarray = uarray.astype(uarray.dtype.newbyteorder("="))
# TODO: figure out if we need to support null checking
if uarray.dtype.itemsize == 4:
qbytes, status, scale, zero = quantize_float_c(
uarray.tobytes(),
self.row,
uarray.size,
1,
0,
0,
self.quantize_level,
self.dither_method,
)[:4]
elif uarray.dtype.itemsize == 8:
qbytes, status, scale, zero = quantize_double_c(
uarray.tobytes(),
self.row,
uarray.size,
1,
0,
0,
self.quantize_level,
self.dither_method,
)[:4]
if status == 0:
raise QuantizationFailedException()
else:
return np.frombuffer(qbytes, dtype=np.int32), scale, zero
|
c535999e4859c942daf1df548e24e5d6a87104f6059fb2ba2568f6bae68338a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsheader`` is a command line script based on astropy.io.fits for printing
the header(s) of one or more FITS file(s) to the standard output in a human-
readable format.
Example uses of fitsheader:
1. Print the header of all the HDUs of a .fits file::
$ fitsheader filename.fits
2. Print the header of the third and fifth HDU extension::
$ fitsheader --extension 3 --extension 5 filename.fits
3. Print the header of a named extension, e.g. select the HDU containing
keywords EXTNAME='SCI' and EXTVER='2'::
$ fitsheader --extension "SCI,2" filename.fits
4. Print only specific keywords::
$ fitsheader --keyword BITPIX --keyword NAXIS filename.fits
5. Print keywords NAXIS, NAXIS1, NAXIS2, etc using a wildcard::
$ fitsheader --keyword NAXIS* filename.fits
6. Dump the header keywords of all the files in the current directory into a
machine-readable csv file::
$ fitsheader --table ascii.csv *.fits > keywords.csv
7. Specify hierarchical keywords with the dotted or spaced notation::
$ fitsheader --keyword ESO.INS.ID filename.fits
$ fitsheader --keyword "ESO INS ID" filename.fits
8. Compare the headers of different fits files, following ESO's ``fitsort``
format::
$ fitsheader --fitsort --extension 0 --keyword ESO.INS.ID *.fits
9. Same as above, sorting the output along a specified keyword::
$ fitsheader -f -s DATE-OBS -e 0 -k DATE-OBS -k ESO.INS.ID *.fits
10. Sort first by OBJECT, then DATE-OBS::
$ fitsheader -f -s OBJECT -s DATE-OBS *.fits
Note that compressed images (HDUs of type
:class:`~astropy.io.fits.CompImageHDU`) really have two headers: a real
BINTABLE header to describe the compressed data, and a fake IMAGE header
representing the image that was compressed. Astropy returns the latter by
default. You must supply the ``--compressed`` option if you require the real
header that describes the compression.
With Astropy installed, please run ``fitsheader --help`` to see the full usage
documentation.
"""
import argparse
import sys
import numpy as np
from astropy import __version__, log
from astropy.io import fits
DESCRIPTION = """
Print the header(s) of a FITS file. Optional arguments allow the desired
extension(s), keyword(s), and output format to be specified.
Note that in the case of a compressed image, the decompressed header is
shown by default.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitsheader
for further documentation.
""".strip()
class ExtensionNotFoundException(Exception):
"""Raised if an HDU extension requested by the user does not exist."""
pass
class HeaderFormatter:
"""Class to format the header(s) of a FITS file for display by the
`fitsheader` tool; essentially a wrapper around a `HDUList` object.
Example usage:
fmt = HeaderFormatter('/path/to/file.fits')
print(fmt.parse(extensions=[0, 3], keywords=['NAXIS', 'BITPIX']))
Parameters
----------
filename : str
Path to a single FITS file.
verbose : bool
Verbose flag, to show more information about missing extensions,
keywords, etc.
Raises
------
OSError
If `filename` does not exist or cannot be read.
"""
def __init__(self, filename, verbose=True):
self.filename = filename
self.verbose = verbose
self._hdulist = fits.open(filename)
def parse(self, extensions=None, keywords=None, compressed=False):
"""Returns the FITS file header(s) in a readable format.
Parameters
----------
extensions : list of int or str, optional
Format only specific HDU(s), identified by number or name.
The name can be composed of the "EXTNAME" or "EXTNAME,EXTVER"
keywords.
keywords : list of str, optional
Keywords for which the value(s) should be returned.
If not specified, then the entire header is returned.
compressed : bool, optional
If True, shows the header describing the compression, rather than
the header obtained after decompression. (Affects FITS files
containing `CompImageHDU` extensions only.)
Returns
-------
formatted_header : str or astropy.table.Table
Traditional 80-char wide format in the case of `HeaderFormatter`;
an Astropy Table object in the case of `TableHeaderFormatter`.
"""
# `hdukeys` will hold the keys of the HDUList items to display
if extensions is None:
hdukeys = range(len(self._hdulist)) # Display all by default
else:
hdukeys = []
for ext in extensions:
try:
# HDU may be specified by number
hdukeys.append(int(ext))
except ValueError:
# The user can specify "EXTNAME" or "EXTNAME,EXTVER"
parts = ext.split(",")
if len(parts) > 1:
extname = ",".join(parts[0:-1])
extver = int(parts[-1])
hdukeys.append((extname, extver))
else:
hdukeys.append(ext)
# Having established which HDUs the user wants, we now format these:
return self._parse_internal(hdukeys, keywords, compressed)
def _parse_internal(self, hdukeys, keywords, compressed):
"""The meat of the formatting; in a separate method to allow overriding."""
result = []
for idx, hdu in enumerate(hdukeys):
try:
cards = self._get_cards(hdu, keywords, compressed)
except ExtensionNotFoundException:
continue
if idx > 0: # Separate HDUs by a blank line
result.append("\n")
result.append(f"# HDU {hdu} in {self.filename}:\n")
for c in cards:
result.append(f"{c}\n")
return "".join(result)
def _get_cards(self, hdukey, keywords, compressed):
"""Returns a list of `astropy.io.fits.card.Card` objects.
This function will return the desired header cards, taking into
account the user's preference to see the compressed or uncompressed
version.
Parameters
----------
hdukey : int or str
Key of a single HDU in the HDUList.
keywords : list of str, optional
Keywords for which the cards should be returned.
compressed : bool, optional
If True, shows the header describing the compression.
Raises
------
ExtensionNotFoundException
If the hdukey does not correspond to an extension.
"""
# First we obtain the desired header
try:
if compressed:
# In the case of a compressed image, return the header before
# decompression (not the default behavior)
header = self._hdulist[hdukey]._header
else:
header = self._hdulist[hdukey].header
except (IndexError, KeyError):
message = f"{self.filename}: Extension {hdukey} not found."
if self.verbose:
log.warning(message)
raise ExtensionNotFoundException(message)
if not keywords: # return all cards
cards = header.cards
else: # specific keywords are requested
cards = []
for kw in keywords:
try:
crd = header.cards[kw]
if isinstance(crd, fits.card.Card): # Single card
cards.append(crd)
else: # Allow for wildcard access
cards.extend(crd)
except KeyError: # Keyword does not exist
if self.verbose:
log.warning(
f"{self.filename} (HDU {hdukey}): Keyword {kw} not found."
)
return cards
def close(self):
self._hdulist.close()
class TableHeaderFormatter(HeaderFormatter):
"""Class to convert the header(s) of a FITS file into a Table object.
The table returned by the `parse` method will contain four columns:
filename, hdu, keyword, and value.
Subclassed from HeaderFormatter, which contains the meat of the formatting.
"""
def _parse_internal(self, hdukeys, keywords, compressed):
"""Method called by the parse method in the parent class."""
tablerows = []
for hdu in hdukeys:
try:
for card in self._get_cards(hdu, keywords, compressed):
tablerows.append(
{
"filename": self.filename,
"hdu": hdu,
"keyword": card.keyword,
"value": str(card.value),
}
)
except ExtensionNotFoundException:
pass
if tablerows:
from astropy import table
return table.Table(tablerows)
return None
def print_headers_traditional(args):
"""Prints FITS header(s) using the traditional 80-char format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
for idx, filename in enumerate(args.filename): # support wildcards
if idx > 0 and not args.keyword:
print() # print a newline between different files
formatter = None
try:
formatter = HeaderFormatter(filename)
print(
formatter.parse(args.extensions, args.keyword, args.compressed), end=""
)
except OSError as e:
log.error(str(e))
finally:
if formatter:
formatter.close()
def print_headers_as_table(args):
"""Prints FITS header(s) in a machine-readable table format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename)
tbl = formatter.parse(args.extensions, args.keyword, args.compressed)
if tbl:
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
from astropy import table
resulting_table = table.vstack(tables)
# Print the string representation of the concatenated table
resulting_table.write(sys.stdout, format=args.table)
def print_headers_as_comparison(args):
"""Prints FITS header(s) with keywords as columns.
This follows the dfits+fitsort format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
from astropy import table
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename, verbose=False)
tbl = formatter.parse(args.extensions, args.keyword, args.compressed)
if tbl:
# Remove empty keywords
tbl = tbl[np.where(tbl["keyword"] != "")]
else:
tbl = table.Table([[filename]], names=("filename",))
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
resulting_table = table.vstack(tables)
# If we obtained more than one hdu, merge hdu and keywords columns
hdus = resulting_table["hdu"]
if np.ma.isMaskedArray(hdus):
hdus = hdus.compressed()
if len(np.unique(hdus)) > 1:
for tab in tables:
new_column = table.Column([f"{row['hdu']}:{row['keyword']}" for row in tab])
tab.add_column(new_column, name="hdu+keyword")
keyword_column_name = "hdu+keyword"
else:
keyword_column_name = "keyword"
# Check how many hdus we are processing
final_tables = []
for tab in tables:
final_table = [table.Column([tab["filename"][0]], name="filename")]
if "value" in tab.colnames:
for row in tab:
if row["keyword"] in ("COMMENT", "HISTORY"):
continue
final_table.append(
table.Column([row["value"]], name=row[keyword_column_name])
)
final_tables.append(table.Table(final_table))
final_table = table.vstack(final_tables)
# Sort if requested
if args.sort:
final_table.sort(args.sort)
# Reorganise to keyword by columns
final_table.pprint(max_lines=-1, max_width=-1)
def main(args=None):
"""This is the main function called by the `fitsheader` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"-e",
"--extension",
metavar="HDU",
action="append",
dest="extensions",
help=(
"specify the extension by name or number; this argument can "
"be repeated to select multiple extensions"
),
)
parser.add_argument(
"-k",
"--keyword",
metavar="KEYWORD",
action="append",
type=str,
help=(
"specify a keyword; this argument can be repeated to select "
"multiple keywords; also supports wildcards"
),
)
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument(
"-t",
"--table",
nargs="?",
default=False,
metavar="FORMAT",
help=(
"print the header(s) in machine-readable table format; the "
'default format is "ascii.fixed_width" (can be "ascii.csv", '
'"ascii.html", "ascii.latex", "fits", etc)'
),
)
mode_group.add_argument(
"-f",
"--fitsort",
action="store_true",
help=(
"print the headers as a table with each unique "
"keyword in a given column (fitsort format) "
),
)
parser.add_argument(
"-s",
"--sort",
metavar="SORT_KEYWORD",
action="append",
type=str,
help=(
"sort output by the specified header keywords, can be repeated to "
"sort by multiple keywords; Only supported with -f/--fitsort"
),
)
parser.add_argument(
"-c",
"--compressed",
action="store_true",
help=(
"for compressed image data, show the true header which describes "
"the compression rather than the data"
),
)
parser.add_argument(
"filename",
nargs="+",
help="path to one or more files; wildcards are supported",
)
args = parser.parse_args(args)
# If `--table` was used but no format specified,
# then use ascii.fixed_width by default
if args.table is None:
args.table = "ascii.fixed_width"
if args.sort:
args.sort = [key.replace(".", " ") for key in args.sort]
if not args.fitsort:
log.error(
"Sorting with -s/--sort is only supported in conjunction with"
" -f/--fitsort"
)
# 2: Unix error convention for command line syntax
sys.exit(2)
if args.keyword:
args.keyword = [key.replace(".", " ") for key in args.keyword]
# Now print the desired headers
try:
if args.table:
print_headers_as_table(args)
elif args.fitsort:
print_headers_as_comparison(args)
else:
print_headers_traditional(args)
except OSError:
# A 'Broken pipe' OSError may occur when stdout is closed prematurely,
# eg. when calling `fitsheader file.fits | head`. We let this pass.
pass
|
150ed688b985727e8c1cc9cbe8bdc34bcf6cb9e4c477e50570c39931c950b997 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsinfo`` is a command-line script based on astropy.io.fits for
printing a summary of the HDUs in one or more FITS files(s) to the
standard output.
Example usage of ``fitsinfo``:
1. Print a summary of the HDUs in a FITS file::
$ fitsinfo filename.fits
Filename: filename.fits
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 138 ()
1 SCI ImageHDU 61 (800, 800) int16
2 SCI ImageHDU 61 (800, 800) int16
3 SCI ImageHDU 61 (800, 800) int16
4 SCI ImageHDU 61 (800, 800) int16
2. Print a summary of HDUs of all the FITS files in the current directory::
$ fitsinfo *.fits
"""
import argparse
import astropy.io.fits as fits
from astropy import __version__, log
DESCRIPTION = """
Print a summary of the HDUs in a FITS file(s).
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitsinfo
for further documentation.
""".strip()
def fitsinfo(filename):
"""
Print a summary of the HDUs in a FITS file.
Parameters
----------
filename : str
The path to a FITS file.
"""
try:
fits.info(filename)
except OSError as e:
log.error(str(e))
def main(args=None):
"""The main function called by the `fitsinfo` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"filename",
nargs="+",
help="Path to one or more FITS files. Wildcards are supported.",
)
args = parser.parse_args(args)
for idx, filename in enumerate(args.filename):
if idx > 0:
print()
fitsinfo(filename)
|
ac8451efd6a45e31c81efb45b1c6008b970161bc3af48f89ba4b0e429c23b9ee | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import csv
import operator
import os
import re
import sys
import textwrap
import warnings
from contextlib import suppress
import numpy as np
from numpy import char as chararray
# This module may have many dependencies on astropy.io.fits.column, but
# astropy.io.fits.column has fewer dependencies overall, so it's easier to
# keep table/column-related utilities in astropy.io.fits.column
from astropy.io.fits.column import (
ATTRIBUTE_TO_KEYWORD,
FITS2NUMPY,
KEYWORD_NAMES,
KEYWORD_TO_ATTRIBUTE,
TDEF_RE,
ColDefs,
Column,
_AsciiColDefs,
_cmp_recformats,
_convert_format,
_FormatP,
_FormatQ,
_makep,
_parse_tformat,
_scalar_to_format,
)
from astropy.io.fits.fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import _is_int, _str_to_num, path_like
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from .base import DELAYED, ExtensionHDU, _ValidHDU
class FITSTableDumpDialect(csv.excel):
"""
A CSV dialect for the Astropy format of ASCII dumps of FITS tables.
"""
delimiter = " "
lineterminator = "\n"
quotechar = '"'
quoting = csv.QUOTE_ALL
skipinitialspace = True
class _TableLikeHDU(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
# The following flag can be used by subclasses to determine whether to load
# variable length data from the heap automatically or whether the columns
# should contain the size and offset in the heap and let the subclass
# decide when to load the data from the heap. This can be used for example
# in CompImageHDU to only load data tiles that are needed.
_load_variable_length_data = True
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(
cls,
columns,
header=None,
nrows=0,
fill=False,
character_as_bytes=False,
**kwargs,
):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs` -like
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(
coldefs, nrows=nrows, fill=fill, character_as_bytes=character_as_bytes
)
hdu = cls(
data=data, header=header, character_as_bytes=character_as_bytes, **kwargs
)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (
self._load_variable_length_data
and any(type(r) in (_FormatP, _FormatQ) for r in columns._recformats)
and self._data_size is not None
and self._data_size > self._theap
):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8, self._data_offset)
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data = raw_data[:tbsize].view(dtype=columns.dtype, type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype, self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
data._load_variable_length_data = self._load_variable_length_data
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder(">")
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header["PCOUNT"]
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data._gap = self._theap - tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_load_data(self):
"""Load the data if asked to."""
if not self._data_loaded:
self.data
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
FITS table extension base HDU class.
Parameters
----------
data : array
Data to be used.
header : `Header` instance
Header to be used. If the ``data`` is also specified, header keywords
specifically related to defining the table structure (such as the
"TXXXn" keywords like TTYPEn) will be overridden by the supplied column
definitions, but all other informational and data model-specific
keywords are kept.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_manages_own_heap = False
"""
This flag implies that when writing VLA tables (P/Q format) the heap
pointers that go into P/Q table columns should not be reordered or
rearranged in any way by the default heap management code.
This is included primarily as an optimization for compressed image HDUs
which perform their own heap maintenance.
"""
def __init__(
self,
data=None,
header=None,
name=None,
uint=False,
ver=None,
character_as_bytes=False,
):
super().__init__(data=data, header=header, name=name, ver=ver)
self._uint = uint
self._character_as_bytes = character_as_bytes
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError("No header to setup HDU.")
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# construct a list of cards of minimal header
cards = [
("XTENSION", self._extension, self._ext_comment),
("BITPIX", 8, "array data type"),
("NAXIS", 2, "number of array dimensions"),
("NAXIS1", 0, "length of dimension 1"),
("NAXIS2", 0, "length of dimension 2"),
("PCOUNT", 0, "number of group parameters"),
("GCOUNT", 1, "number of groups"),
("TFIELDS", 0, "number of table fields"),
]
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy(strip=True)
cards.extend(hcopy.cards)
self._header = Header(cards)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# self._data_type is FITS_rec.
if isinstance(data, self._data_type):
self.data = data
else:
self.data = self._data_type.from_columns(data)
# TEMP: Special column keywords are normally overwritten by attributes
# from Column objects. In Astropy 3.0, several new keywords are now
# recognized as being special column keywords, but we don't
# automatically clear them yet, as we need to raise a deprecation
# warning for at least one major version.
if header is not None:
future_ignore = set()
for keyword in header.keys():
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group("label")
except Exception:
continue # skip if there is no match
if base_keyword in {
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
}:
future_ignore.add(base_keyword)
if future_ignore:
keys = ", ".join(x + "n" for x in sorted(future_ignore))
warnings.warn(
"The following keywords are now recognized as special "
"column-related attributes and should be set via the "
"Column objects: {}. In future, these values will be "
"dropped from manually specified headers automatically "
"and replaced with values generated based on the "
"Column objects.".format(keys),
AstropyDeprecationWarning,
)
# TODO: Too much of the code in this class uses header keywords
# in making calculations related to the data size. This is
# unreliable, however, in cases when users mess with the header
# unintentionally--code that does this should be cleaned up.
self._header["NAXIS1"] = self.data._raw_itemsize
self._header["NAXIS2"] = self.data.shape[0]
self._header["TFIELDS"] = len(self.data._coldefs)
self.columns = self.data._coldefs
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError("Table data has incorrect type.")
# Ensure that the correct EXTNAME is set on the new header if one was
# created, or that it overrides the existing EXTNAME if different
if name:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
This is an abstract type that implements the shared functionality of
the ASCII and Binary Table HDU types, which should be used instead of
this.
"""
raise NotImplementedError
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
if self._has_data and hasattr(self.data, "_coldefs"):
return self.data._coldefs
return self._columns_type(self)
@lazyproperty
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
data._character_as_bytes = self._character_as_bytes
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@data.setter
def data(self, data):
if "data" in self.__dict__:
if self.__dict__["data"] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
self._modified = True
if data is None and self.columns:
# Create a new table with the same columns, but empty rows
formats = ",".join(self.columns._recformats)
data = np.rec.array(
None, formats=formats, names=self.columns.names, shape=0
)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# Go ahead and always make a view, even if the data is already the
# correct class (self._data_type) so we can update things like the
# column defs, if necessary
data = data.view(self._data_type)
if not isinstance(data.columns, self._columns_type):
# This would be the place, if the input data was for an ASCII
# table and this is binary table, or vice versa, to convert the
# data to the appropriate format for the table type
new_columns = self._columns_type(data.columns)
data = FITS_rec.from_columns(new_columns)
if "data" in self.__dict__:
self.columns._remove_listener(self.__dict__["data"])
self.__dict__["data"] = data
self.columns = self.data.columns
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError("Table data has incorrect type.")
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
@property
def _nrows(self):
if not self._data_loaded:
return self._header.get("NAXIS2", 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
size = self._header["NAXIS1"] * self._header["NAXIS2"]
return self._header.get("THEAP", size)
# TODO: Need to either rename this to update_header, for symmetry with the
# Image HDUs, or just at some point deprecate it and remove it altogether,
# since header updates should occur automatically when necessary...
def update(self):
"""
Update header keywords to reflect recent changes of columns.
"""
self._header.set("NAXIS1", self.data._raw_itemsize, after="NAXIS")
self._header.set("NAXIS2", self.data.shape[0], after="NAXIS1")
self._header.set("TFIELDS", len(self.columns), after="GCOUNT")
self._clear_table_keywords()
self._populate_table_keywords()
def copy(self):
"""
Make a copy of the table HDU, both header and data are copied.
"""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
return self.__class__(data=self.data.copy(), header=self._header.copy())
def _prewriteto(self, checksum=False, inplace=False):
if self._has_data:
self.data._scale_back(update_heap_pointers=not self._manages_own_heap)
# check TFIELDS and NAXIS2
self._header["TFIELDS"] = len(self.data._coldefs)
self._header["NAXIS2"] = self.data.shape[0]
# calculate PCOUNT, for variable length tables
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
heapstart = self._header.get("THEAP", tbsize)
self.data._gap = heapstart - tbsize
pcount = self.data._heapsize + self.data._gap
if pcount > 0:
self._header["PCOUNT"] = pcount
# update the other T****n keywords
self._populate_table_keywords()
# update TFORM for variable length columns
for idx in range(self.data._nfields):
format = self.data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.data.field(idx).max
# May be either _FormatP or _FormatQ
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat, max=_max)
self._header["TFORM" + str(idx + 1)] = format.tform
return super()._prewriteto(checksum, inplace)
def _verify(self, option="warn"):
"""
_TableBaseHDU verify method.
"""
errs = super()._verify(option=option)
if len(self._header) > 1:
if not (
isinstance(self._header[0], str)
and self._header[0].rstrip() == self._extension
):
err_text = "The XTENSION keyword must match the HDU type."
fix_text = f"Converted the XTENSION keyword to {self._extension}."
def fix(header=self._header):
header[0] = (self._extension, self._ext_comment)
errs.append(
self.run_option(
option, err_text=err_text, fix_text=fix_text, fix=fix
)
)
self.req_cards("NAXIS", None, lambda v: (v == 2), 2, option, errs)
self.req_cards("BITPIX", None, lambda v: (v == 8), 8, option, errs)
self.req_cards(
"TFIELDS",
7,
lambda v: (_is_int(v) and v >= 0 and v <= 999),
0,
option,
errs,
)
tfields = self._header["TFIELDS"]
for idx in range(tfields):
self.req_cards("TFORM" + str(idx + 1), None, None, None, option, errs)
return errs
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
nrows = 0
else:
nrows = len(self.data)
ncols = len(self.columns)
format = self.columns.formats
# if data is not touched yet, use header info.
else:
nrows = self._header["NAXIS2"]
ncols = self._header["TFIELDS"]
format = ", ".join(
[self._header["TFORM" + str(j + 1)] for j in range(ncols)]
)
format = f"[{format}]"
dims = f"{nrows}R x {ncols}C"
ncards = len(self._header)
return (self.name, self.ver, class_name, ncards, dims, format)
def _update_column_removed(self, columns, idx):
super()._update_column_removed(columns, idx)
# Fix the header to reflect the column removal
self._clear_table_keywords(index=idx)
def _update_column_attribute_changed(
self, column, col_idx, attr, old_value, new_value
):
"""
Update the header when one of the column objects is updated.
"""
# base_keyword is the keyword without the index such as TDIM
# while keyword is like TDIM1
base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
keyword = base_keyword + str(col_idx + 1)
if keyword in self._header:
if new_value is None:
# If the new value is None, i.e. None was assigned to the
# column attribute, then treat this as equivalent to deleting
# that attribute
del self._header[keyword]
else:
self._header[keyword] = new_value
else:
keyword_idx = KEYWORD_NAMES.index(base_keyword)
# Determine the appropriate keyword to insert this one before/after
# if it did not already exist in the header
for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
before_keyword += str(col_idx + 1)
if before_keyword in self._header:
self._header.insert(
before_keyword, (keyword, new_value), after=True
)
break
else:
for after_keyword in KEYWORD_NAMES[keyword_idx + 1 :]:
after_keyword += str(col_idx + 1)
if after_keyword in self._header:
self._header.insert(after_keyword, (keyword, new_value))
break
else:
# Just append
self._header[keyword] = new_value
def _clear_table_keywords(self, index=None):
"""
Wipe out any existing table definition keywords from the header.
If specified, only clear keywords for the given table index (shifting
up keywords for any other columns). The index is zero-based.
Otherwise keywords for all columns.
"""
# First collect all the table structure related keyword in the header
# into a single list so we can then sort them by index, which will be
# useful later for updating the header in a sensible order (since the
# header *might* not already be written in a reasonable order)
table_keywords = []
for idx, keyword in enumerate(self._header.keys()):
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group("label")
except Exception:
continue # skip if there is no match
if base_keyword in KEYWORD_TO_ATTRIBUTE:
# TEMP: For Astropy 3.0 we don't clear away the following keywords
# as we are first raising a deprecation warning that these will be
# dropped automatically if they were specified in the header. We
# can remove this once we are happy to break backward-compatibility
if base_keyword in {
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
}:
continue
num = int(match.group("num")) - 1 # convert to zero-base
table_keywords.append((idx, match.group(0), base_keyword, num))
# First delete
rev_sorted_idx_0 = sorted(
table_keywords, key=operator.itemgetter(0), reverse=True
)
for idx, keyword, _, num in rev_sorted_idx_0:
if index is None or index == num:
del self._header[idx]
# Now shift up remaining column keywords if only one column was cleared
if index is not None:
sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))
for _, keyword, base_keyword, num in sorted_idx_3:
if num <= index:
continue
old_card = self._header.cards[keyword]
new_card = (base_keyword + str(num), old_card.value, old_card.comment)
self._header.insert(keyword, new_card)
del self._header[keyword]
# Also decrement TFIELDS
if "TFIELDS" in self._header:
self._header["TFIELDS"] -= 1
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
for idx, column in enumerate(self.columns):
for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():
val = getattr(column, attr)
if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
class TableHDU(_TableBaseHDU):
"""
FITS ASCII table extension HDU class.
Parameters
----------
data : array or `FITS_rec`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = "TABLE"
_ext_comment = "ASCII table extension"
_padding_byte = " "
_columns_type = _AsciiColDefs
__format_RE = re.compile(r"(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?")
def __init__(
self, data=None, header=None, name=None, ver=None, character_as_bytes=False
):
super().__init__(
data, header, name=name, ver=ver, character_as_bytes=character_as_bytes
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension == cls._extension
def _get_tbdata(self):
columns = self.columns
names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
dup = np.rec.find_duplicate(names)
if dup:
raise ValueError(f"Duplicate field names: {dup}")
# TODO: Determine if this extra logic is necessary--I feel like the
# _AsciiColDefs class should be responsible for telling the table what
# its dtype should be...
itemsize = columns.spans[-1] + columns.starts[-1] - 1
dtype = {}
for idx in range(len(columns)):
data_type = "S" + str(columns.spans[idx])
if idx == len(columns) - 1:
# The last column is padded out to the value of NAXIS1
if self._header["NAXIS1"] > itemsize:
data_type = "S" + str(
columns.spans[idx] + self._header["NAXIS1"] - itemsize
)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# We need to pad the data to a block length before calculating
# the datasum.
bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)
padding = np.frombuffer(_pad_length(self.size) * b" ", dtype=np.ubyte)
d = np.append(bytes_array, padding)
cs = self._compute_checksum(d)
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _verify(self, option="warn"):
"""
`TableHDU` verify method.
"""
errs = super()._verify(option=option)
self.req_cards("PCOUNT", None, lambda v: (v == 0), 0, option, errs)
tfields = self._header["TFIELDS"]
for idx in range(tfields):
self.req_cards("TBCOL" + str(idx + 1), None, _is_int, None, option, errs)
return errs
class BinTableHDU(_TableBaseHDU):
"""
Binary table HDU class.
Parameters
----------
data : array, `FITS_rec`, or `~astropy.table.Table`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = "BINTABLE"
_ext_comment = "binary table extension"
def __init__(
self,
data=None,
header=None,
name=None,
uint=False,
ver=None,
character_as_bytes=False,
):
if data is not None and data is not DELAYED:
from astropy.table import Table
if isinstance(data, Table):
from astropy.io.fits.convenience import table_to_hdu
hdu = table_to_hdu(data)
if header is not None:
hdu.header.update(header)
data = hdu.data
header = hdu.header
super().__init__(
data,
header,
name=name,
uint=uint,
ver=ver,
character_as_bytes=character_as_bytes,
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension in (cls._extension, "A3DTABLE")
def _calculate_datasum_with_heap(self):
"""
Calculate the value for the ``DATASUM`` card given the input data.
"""
with _binary_table_byte_swap(self.data) as data:
dout = data.view(type=np.ndarray, dtype=np.ubyte)
csum = self._compute_checksum(dout)
# Now add in the heap data to the checksum (we can skip any gap
# between the table and the heap since it's all zeros and doesn't
# contribute to the checksum
if data._get_raw_data() is None:
# This block is still needed because
# test_variable_length_table_data leads to ._get_raw_data
# returning None which means _get_heap_data doesn't work.
# Which happens when the data is loaded in memory rather than
# being unloaded on disk
for idx in range(data._nfields):
if isinstance(data.columns._recformats[idx], _FormatP):
for coldata in data.field(idx):
# coldata should already be byteswapped from the call
# to _binary_table_byte_swap
if not len(coldata):
continue
csum = self._compute_checksum(coldata, csum)
else:
csum = self._compute_checksum(data._get_heap_data(), csum)
return csum
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# This method calculates the datasum while incorporating any
# heap data, which is obviously not handled from the base
# _calculate_datasum
return self._calculate_datasum_with_heap()
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
with _binary_table_byte_swap(self.data) as data:
if _has_unicode_fields(data):
# If the raw data was a user-supplied recarray, we can't write
# unicode columns directly to the file, so we have to switch
# to a slower row-by-row write
self._writedata_by_row(fileobj)
else:
fileobj.writearray(data)
# write out the heap of variable length array columns this has
# to be done after the "regular" data is written (above)
# to avoid a bug in the lustre filesystem client, don't
# write 0-byte objects
if data._gap > 0:
fileobj.write((data._gap * "\0").encode("ascii"))
nbytes = data._gap
if not self._manages_own_heap:
# Write the heap data one column at a time, in the order
# that the data pointers appear in the column (regardless
# if that data pointer has a different, previous heap
# offset listed)
for idx in range(data._nfields):
if not isinstance(data.columns._recformats[idx], _FormatP):
continue
field = self.data.field(idx)
for row in field:
if len(row) > 0:
nbytes += row.nbytes
fileobj.writearray(row)
else:
heap_data = data._get_heap_data()
if len(heap_data) > 0:
nbytes += len(heap_data)
fileobj.writearray(heap_data)
data._heapsize = nbytes - data._gap
size += nbytes
size += self.data.size * self.data._raw_itemsize
return size
def _writedata_by_row(self, fileobj):
fields = [self.data.field(idx) for idx in range(len(self.data.columns))]
# Creating Record objects is expensive (as in
# `for row in self.data:` so instead we just iterate over the row
# indices and get one field at a time:
for idx in range(len(self.data)):
for field in fields:
item = field[idx]
field_width = None
if field.dtype.kind == "U":
# Read the field *width* by reading past the field kind.
i = field.dtype.str.index(field.dtype.kind)
field_width = int(field.dtype.str[i + 1 :])
item = np.char.encode(item, "ascii")
fileobj.writearray(item)
if field_width is not None:
j = item.dtype.str.index(item.dtype.kind)
item_length = int(item.dtype.str[j + 1 :])
# Fix padding problem (see #5296).
padding = "\x00" * (field_width - item_length)
fileobj.write(padding.encode("ascii"))
_tdump_file_format = textwrap.dedent(
"""
- **datafile:** Each line of the data file represents one row of table
data. The data is output one column at a time in column order. If
a column contains an array, each element of the column array in the
current row is output before moving on to the next column. Each row
ends with a new line.
Integer data is output right-justified in a 21-character field
followed by a blank. Floating point data is output right justified
using 'g' format in a 21-character field with 15 digits of
precision, followed by a blank. String data that does not contain
whitespace is output left-justified in a field whose width matches
the width specified in the ``TFORM`` header parameter for the
column, followed by a blank. When the string data contains
whitespace characters, the string is enclosed in quotation marks
(``""``). For the last data element in a row, the trailing blank in
the field is replaced by a new line character.
For column data containing variable length arrays ('P' format), the
array data is preceded by the string ``'VLA_Length= '`` and the
integer length of the array for that row, left-justified in a
21-character field, followed by a blank.
.. note::
This format does *not* support variable length arrays using the
('Q' format) due to difficult to overcome ambiguities. What this
means is that this file format cannot support VLA columns in
tables stored in files that are over 2 GB in size.
For column data representing a bit field ('X' format), each bit
value in the field is output right-justified in a 21-character field
as 1 (for true) or 0 (for false).
- **cdfile:** Each line of the column definitions file provides the
definitions for one column in the table. The line is broken up into
8, sixteen-character fields. The first field provides the column
name (``TTYPEn``). The second field provides the column format
(``TFORMn``). The third field provides the display format
(``TDISPn``). The fourth field provides the physical units
(``TUNITn``). The fifth field provides the dimensions for a
multidimensional array (``TDIMn``). The sixth field provides the
value that signifies an undefined value (``TNULLn``). The seventh
field provides the scale factor (``TSCALn``). The eighth field
provides the offset value (``TZEROn``). A field value of ``""`` is
used to represent the case where no value is provided.
- **hfile:** Each line of the header parameters file provides the
definition of a single HDU header card as represented by the card
image.
"""
)
def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):
"""
Dump the table HDU to a file in ASCII format. The table may be dumped
in three separate files, one containing column definitions, one
containing header parameters, and one for table data.
Parameters
----------
datafile : path-like or file-like, optional
Output data file. The default is the root name of the
fits file associated with this HDU appended with the
extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`, no
column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `dump` method is to allow viewing and editing
the table data and parameters in a standard text editor.
The `load` method can be used to create a new table from the three
plain text (ASCII) files.
"""
if isinstance(datafile, path_like):
datafile = os.path.expanduser(datafile)
if isinstance(cdfile, path_like):
cdfile = os.path.expanduser(cdfile)
if isinstance(hfile, path_like):
hfile = os.path.expanduser(hfile)
# check if the output files already exist
exist = []
files = [datafile, cdfile, hfile]
for f in files:
if isinstance(f, path_like):
if os.path.exists(f) and os.path.getsize(f) != 0:
if overwrite:
os.remove(f)
else:
exist.append(f)
if exist:
raise OSError(
" ".join([f"File '{f}' already exists." for f in exist])
+ " If you mean to replace the file(s) then use the argument "
"'overwrite=True'."
)
# Process the data
self._dump_data(datafile)
# Process the column definitions
if cdfile:
self._dump_coldefs(cdfile)
# Process the header parameters
if hfile:
self._header.tofile(hfile, sep="\n", endcard=False, padding=False)
if isinstance(dump.__doc__, str):
dump.__doc__ += _tdump_file_format.replace("\n", "\n ")
def load(cls, datafile, cdfile=None, hfile=None, replace=False, header=None):
"""
Create a table from the input ASCII files. The input is from up to
three separate files, one containing column definitions, one containing
header parameters, and one containing column data.
The column definition and header parameters files are not required.
When absent the column definitions and/or header parameters are taken
from the header object given in the header argument; otherwise sensible
defaults are inferred (though this mode is not recommended).
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like, optional
Input column definition file containing the names,
formats, display formats, physical units, multidimensional
array dimensions, undefined values, scale factors, and
offsets associated with the columns in the table. If
`None`, the column definitions are taken from the current
values in this object.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table. If
`None`, the header parameter definitions are taken from
the current values in this objects header.
replace : bool, optional
When `True`, indicates that the entire header should be
replaced with the contents of the ASCII file instead of
just updating the current header.
header : `~astropy.io.fits.Header`, optional
When the cdfile and hfile are missing, use this Header object in
the creation of the new table and HDU. Otherwise this Header
supersedes the keywords from hfile, which is only used to update
values not present in this Header, unless ``replace=True`` in which
this Header's values are completely replaced with the values from
hfile.
Notes
-----
The primary use for the `load` method is to allow the input of ASCII
data that was edited in a standard text editor of the table data and
parameters. The `dump` method can be used to create the initial ASCII
files.
"""
# Process the parameter file
if header is None:
header = Header()
if hfile:
if replace:
header = Header.fromtextfile(hfile)
else:
header.extend(
Header.fromtextfile(hfile), update=True, update_first=True
)
coldefs = None
# Process the column definitions file
if cdfile:
coldefs = cls._load_coldefs(cdfile)
# Process the data file
data = cls._load_data(datafile, coldefs)
if coldefs is None:
coldefs = ColDefs(data)
# Create a new HDU using the supplied header and data
hdu = cls(data=data, header=header)
hdu.columns = coldefs
return hdu
if isinstance(load.__doc__, str):
load.__doc__ += _tdump_file_format.replace("\n", "\n ")
load = classmethod(load)
# Have to create a classmethod from this here instead of as a decorator;
# otherwise we can't update __doc__
def _dump_data(self, fileobj):
"""
Write the table data in the ASCII format read by BinTableHDU.load()
to fileobj.
"""
if not fileobj and self._file:
root = os.path.splitext(self._file.name)[0]
fileobj = root + ".txt"
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "w")
close_file = True
linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)
# Process each row of the table and output one row at a time
def format_value(val, format):
if format[0] == "S":
itemsize = int(format[1:])
return "{:{size}}".format(val, size=itemsize)
elif format in np.typecodes["AllInteger"]:
# output integer
return f"{val:21d}"
elif format in np.typecodes["Complex"]:
return f"{val.real:21.15g}+{val.imag:.15g}j"
elif format in np.typecodes["Float"]:
# output floating point
return f"{val:#21.15g}"
for row in self.data:
line = [] # the line for this row of the table
# Process each column of the row.
for column in self.columns:
# format of data in a variable length array
# where None means it is not a VLA:
vla_format = None
format = _convert_format(column.format)
if isinstance(format, _FormatP):
# P format means this is a variable length array so output
# the length of the array for this row and set the format
# for the VLA data
line.append("VLA_Length=")
line.append(f"{len(row[column.name]):21d}")
_, dtype, option = _parse_tformat(column.format)
vla_format = FITS2NUMPY[option[0]][0]
if vla_format:
# Output the data for each element in the array
for val in row[column.name].flat:
line.append(format_value(val, vla_format))
else:
# The column data is a single element
dtype = self.data.dtype.fields[column.name][0]
array_format = dtype.char
if array_format == "V":
array_format = dtype.base.char
if array_format == "S":
array_format += str(dtype.itemsize)
if dtype.char == "V":
for value in row[column.name].flat:
line.append(format_value(value, array_format))
else:
line.append(format_value(row[column.name], array_format))
linewriter.writerow(line)
if close_file:
fileobj.close()
def _dump_coldefs(self, fileobj):
"""
Write the column definition parameters in the ASCII format read by
BinTableHDU.load() to fileobj.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "w")
close_file = True
# Process each column of the table and output the result to the
# file one at a time
for column in self.columns:
line = [column.name, column.format]
attrs = ["disp", "unit", "dim", "null", "bscale", "bzero"]
line += [
"{!s:16s}".format(value if value else '""')
for value in (getattr(column, attr) for attr in attrs)
]
fileobj.write(" ".join(line))
fileobj.write("\n")
if close_file:
fileobj.close()
@classmethod
def _load_data(cls, fileobj, coldefs=None):
"""
Read the table data from the ASCII file output by BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
fileobj = open(fileobj)
close_file = True
initialpos = fileobj.tell() # We'll be returning here later
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
# First we need to do some preprocessing on the file to find out how
# much memory we'll need to reserve for the table. This is necessary
# even if we already have the coldefs in order to determine how many
# rows to reserve memory for
vla_lengths = []
recformats = []
names = []
nrows = 0
if coldefs is not None:
recformats = coldefs._recformats
names = coldefs.names
def update_recformats(value, idx):
fitsformat = _scalar_to_format(value)
recformat = _convert_format(fitsformat)
if idx >= len(recformats):
recformats.append(recformat)
else:
if _cmp_recformats(recformats[idx], recformat) < 0:
recformats[idx] = recformat
# TODO: The handling of VLAs could probably be simplified a bit
for row in linereader:
nrows += 1
if coldefs is not None:
continue
col = 0
idx = 0
while idx < len(row):
if row[idx] == "VLA_Length=":
if col < len(vla_lengths):
vla_length = vla_lengths[col]
else:
vla_length = int(row[idx + 1])
vla_lengths.append(vla_length)
idx += 2
while vla_length:
update_recformats(row[idx], col)
vla_length -= 1
idx += 1
col += 1
else:
if col >= len(vla_lengths):
vla_lengths.append(None)
update_recformats(row[idx], col)
col += 1
idx += 1
# Update the recformats for any VLAs
for idx, length in enumerate(vla_lengths):
if length is not None:
recformats[idx] = str(length) + recformats[idx]
dtype = np.rec.format_parser(recformats, names, None).dtype
# TODO: In the future maybe enable loading a bit at a time so that we
# can convert from this format to an actual FITS file on disk without
# needing enough physical memory to hold the entire thing at once
hdu = BinTableHDU.from_columns(
np.recarray(shape=1, dtype=dtype), nrows=nrows, fill=True
)
# TODO: It seems to me a lot of this could/should be handled from
# within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
arr = data.columns._arrays[idx]
dt = recformats[idx][len(str(length)) :]
# NOTE: FormatQ not supported here; it's hard to determine
# whether or not it will be necessary to use a wider descriptor
# type. The function documentation will have to serve as a
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
name = data.columns.names[idx]
data._cache_field(name, _makep(arr, arr, recformats[idx]))
def format_value(col, val):
# Special formatting for a couple particular data types
if recformats[col] == FITS2NUMPY["L"]:
return bool(int(val))
elif recformats[col] == FITS2NUMPY["M"]:
# For some reason, in arrays/fields where numpy expects a
# complex it's not happy to take a string representation
# (though it's happy to do that in other contexts), so we have
# to convert the string representation for it:
return complex(val)
else:
return val
# Jump back to the start of the data and create a new line reader
fileobj.seek(initialpos)
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
for row, line in enumerate(linereader):
col = 0
idx = 0
while idx < len(line):
if line[idx] == "VLA_Length=":
vla_len = vla_lengths[col]
idx += 2
slice_ = slice(idx, idx + vla_len)
data[row][col][:] = line[idx : idx + vla_len]
idx += vla_len
elif dtype[col].shape:
# This is an array column
array_size = int(np.multiply.reduce(dtype[col].shape))
slice_ = slice(idx, idx + array_size)
idx += array_size
else:
slice_ = None
if slice_ is None:
# This is a scalar row element
data[row][col] = format_value(col, line[idx])
idx += 1
else:
data[row][col].flat[:] = [
format_value(col, val) for val in line[slice_]
]
col += 1
if close_file:
fileobj.close()
return data
@classmethod
def _load_coldefs(cls, fileobj):
"""
Read the table column definitions from the ASCII file output by
BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
fileobj = open(fileobj)
close_file = True
columns = []
for line in fileobj:
words = line[:-1].split()
kwargs = {}
for key in ["name", "format", "disp", "unit", "dim"]:
kwargs[key] = words.pop(0).replace('""', "")
for key in ["null", "bscale", "bzero"]:
word = words.pop(0).replace('""', "")
if word:
word = _str_to_num(word)
kwargs[key] = word
columns.append(Column(**kwargs))
if close_file:
fileobj.close()
return ColDefs(columns)
@contextlib.contextmanager
def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (
not isinstance(c, chararray.chararray)
and c.itemsize > 1
and c.dtype.str[0] in swap_types
):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({"names": names, "formats": formats, "offsets": offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype
|
35035b6dcc76dc4b6a8d602f667126c8b243b51676c080ff34099a9d7e74ddcf | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import mmap
import sys
import warnings
import numpy as np
from astropy.io.fits.header import Header
from astropy.io.fits.util import (
_is_dask_array,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
)
from astropy.io.fits.verify import VerifyWarning
from astropy.utils import isiterable, lazyproperty
from .base import BITPIX2DTYPE, DELAYED, DTYPE2BITPIX, ExtensionHDU, _ValidHDU
__all__ = ["Section", "PrimaryHDU", "ImageHDU"]
class _ImageBaseHDU(_ValidHDU):
"""FITS image HDU base class.
Attributes
----------
header
image header
data
image data
"""
standard_keyword_comments = {
"SIMPLE": "conforms to FITS standard",
"XTENSION": "Image extension",
"BITPIX": "array data type",
"NAXIS": "number of array dimensions",
"GROUPS": "has groups",
"PCOUNT": "number of parameters",
"GCOUNT": "number of groups",
}
def __init__(
self,
data=None,
header=None,
do_not_scale_image_data=False,
uint=True,
scale_back=False,
ignore_blank=False,
**kwargs,
):
from .groups import GroupsHDU
super().__init__(data=data, header=header)
if data is DELAYED:
# Presumably if data is DELAYED then this HDU is coming from an
# open file, and was not created in memory
if header is None:
# this should never happen
raise ValueError("No header to setup HDU.")
else:
# TODO: Some of this card manipulation should go into the
# PrimaryHDU and GroupsHDU subclasses
# construct a list of cards of minimal header
if isinstance(self, ExtensionHDU):
c0 = ("XTENSION", "IMAGE", self.standard_keyword_comments["XTENSION"])
else:
c0 = ("SIMPLE", True, self.standard_keyword_comments["SIMPLE"])
cards = [
c0,
("BITPIX", 8, self.standard_keyword_comments["BITPIX"]),
("NAXIS", 0, self.standard_keyword_comments["NAXIS"]),
]
if isinstance(self, GroupsHDU):
cards.append(("GROUPS", True, self.standard_keyword_comments["GROUPS"]))
if isinstance(self, (ExtensionHDU, GroupsHDU)):
cards.append(("PCOUNT", 0, self.standard_keyword_comments["PCOUNT"]))
cards.append(("GCOUNT", 1, self.standard_keyword_comments["GCOUNT"]))
if header is not None:
orig = header.copy()
header = Header(cards)
header.extend(orig, strip=True, update=True, end=True)
else:
header = Header(cards)
self._header = header
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
# Keep track of whether BZERO/BSCALE were set from the header so that
# values for self._orig_bzero and self._orig_bscale can be set
# properly, if necessary, once the data has been set.
bzero_in_header = "BZERO" in self._header
bscale_in_header = "BSCALE" in self._header
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
# Save off other important values from the header needed to interpret
# the image data
self._axes = [
self._header.get("NAXIS" + str(axis + 1), 0)
for axis in range(self._header.get("NAXIS", 0))
]
# Not supplying a default for BITPIX makes sense because BITPIX
# is either in the header or should be determined from the dtype of
# the data (which occurs when the data is set).
self._bitpix = self._header.get("BITPIX")
self._gcount = self._header.get("GCOUNT", 1)
self._pcount = self._header.get("PCOUNT", 0)
self._blank = None if ignore_blank else self._header.get("BLANK")
self._verify_blank()
self._orig_bitpix = self._bitpix
self._orig_blank = self._header.get("BLANK")
# These get set again below, but need to be set to sensible defaults
# here.
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
# Set the name attribute if it was provided (if this is an ImageHDU
# this will result in setting the EXTNAME keyword of the header as
# well)
if "name" in kwargs and kwargs["name"]:
self.name = kwargs["name"]
if "ver" in kwargs and kwargs["ver"]:
self.ver = kwargs["ver"]
# Set to True if the data or header is replaced, indicating that
# update_header should be called
self._modified = False
if data is DELAYED:
if not do_not_scale_image_data and (self._bscale != 1 or self._bzero != 0):
# This indicates that when the data is accessed or written out
# to a new file it will need to be rescaled
self._data_needs_rescale = True
return
else:
# Setting data will update the header and set _bitpix, _bzero,
# and _bscale to the appropriate BITPIX for the data, and always
# sets _bzero=0 and _bscale=1.
self.data = data
# Check again for BITPIX/BSCALE/BZERO in case they changed when the
# data was assigned. This can happen, for example, if the input
# data is an unsigned int numpy array.
self._bitpix = self._header.get("BITPIX")
# Do not provide default values for BZERO and BSCALE here because
# the keywords will have been deleted in the header if appropriate
# after scaling. We do not want to put them back in if they
# should not be there.
self._bzero = self._header.get("BZERO")
self._bscale = self._header.get("BSCALE")
# Handle case where there was no BZERO/BSCALE in the initial header
# but there should be a BSCALE/BZERO now that the data has been set.
if not bzero_in_header:
self._orig_bzero = self._bzero
if not bscale_in_header:
self._orig_bscale = self._bscale
@classmethod
def match_header(cls, header):
"""
_ImageBaseHDU is sort of an abstract class for HDUs containing image
data (as opposed to table data) and should never be used directly.
"""
raise NotImplementedError
@property
def is_image(self):
return True
@property
def section(self):
"""
Access a section of the image array without loading the entire array
into memory. The :class:`Section` object returned by this attribute is
not meant to be used directly by itself. Rather, slices of the section
return the appropriate slice of the data, and loads *only* that section
into memory.
Sections are useful for retrieving a small subset of data from a remote
file that has been opened with the ``use_fsspec=True`` parameter.
For example, you can use this feature to download a small cutout from
a large FITS image hosted in the Amazon S3 cloud (see the
:ref:`astropy:fits-cloud-files` section of the Astropy
documentation for more details.)
For local files, sections are mostly obsoleted by memmap support, but
should still be used to deal with very large scaled images.
Note that sections cannot currently be written to. Moreover, any
in-memory updates to the image's ``.data`` property may not be
reflected in the slices obtained via ``.section``. See the
:ref:`astropy:data-sections` section of the documentation for
more details.
"""
return Section(self)
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@property
def header(self):
return self._header
@header.setter
def header(self, header):
self._header = header
self._modified = True
self.update_header()
@lazyproperty
def data(self):
"""
Image/array data as a `~numpy.ndarray`.
Please remember that the order of axes on an Numpy array are opposite
of the order specified in the FITS file. For example for a 2D image
the "rows" or y-axis are the first dimension, and the "columns" or
x-axis are the second dimension.
If the data is scaled using the BZERO and BSCALE parameters, this
attribute returns the data scaled to its physical values unless the
file was opened with ``do_not_scale_image_data=True``.
"""
if len(self._axes) < 1:
return
data = self._get_scaled_image_data(self._data_offset, self.shape)
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if "data" in self.__dict__ and self.__dict__["data"] is not None:
if self.__dict__["data"] is data:
return
else:
self._data_replaced = True
was_unsigned = _is_pseudo_integer(self.__dict__["data"].dtype)
else:
self._data_replaced = True
was_unsigned = False
if data is not None:
if not isinstance(data, np.ndarray) and not _is_dask_array(data):
# Try to coerce the data into a numpy array--this will work, on
# some level, for most objects
try:
data = np.array(data)
except Exception: # pragma: no cover
raise TypeError(
f"data object {data!r} could not be coerced into an " f"ndarray"
)
if data.shape == ():
raise TypeError(
f"data object {data!r} should have at least one dimension"
)
self.__dict__["data"] = data
self._modified = True
if self.data is None:
self._axes = []
else:
# Set new values of bitpix, bzero, and bscale now, but wait to
# revise original values until header is updated.
self._bitpix = DTYPE2BITPIX[data.dtype.name]
self._bscale = 1
self._bzero = 0
self._blank = None
self._axes = list(data.shape)
self._axes.reverse()
# Update the header, including adding BZERO/BSCALE if new data is
# unsigned. Does not change the values of self._bitpix,
# self._orig_bitpix, etc.
self.update_header()
if data is not None and was_unsigned:
self._update_header_scale_info(data.dtype)
# Keep _orig_bitpix as it was until header update is done, then
# set it, to allow easier handling of the case of unsigned
# integer data being converted to something else. Setting these here
# is needed only for the case do_not_scale_image_data=True when
# setting the data to unsigned int.
# If necessary during initialization, i.e. if BSCALE and BZERO were
# not in the header but the data was unsigned, the attributes below
# will be update in __init__.
self._orig_bitpix = self._bitpix
self._orig_bscale = self._bscale
self._orig_bzero = self._bzero
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
def update_header(self):
"""
Update the header keywords to agree with the data.
"""
if not (
self._modified
or self._header._modified
or (self._has_data and self.shape != self.data.shape)
):
# Not likely that anything needs updating
return
old_naxis = self._header.get("NAXIS", 0)
if "BITPIX" not in self._header:
bitpix_comment = self.standard_keyword_comments["BITPIX"]
else:
bitpix_comment = self._header.comments["BITPIX"]
# Update the BITPIX keyword and ensure it's in the correct
# location in the header
self._header.set("BITPIX", self._bitpix, bitpix_comment, after=0)
# If the data's shape has changed (this may have happened without our
# noticing either via a direct update to the data.shape attribute) we
# need to update the internal self._axes
if self._has_data and self.shape != self.data.shape:
self._axes = list(self.data.shape)
self._axes.reverse()
# Update the NAXIS keyword and ensure it's in the correct location in
# the header
if "NAXIS" in self._header:
naxis_comment = self._header.comments["NAXIS"]
else:
naxis_comment = self.standard_keyword_comments["NAXIS"]
self._header.set("NAXIS", len(self._axes), naxis_comment, after="BITPIX")
# TODO: This routine is repeated in several different classes--it
# should probably be made available as a method on all standard HDU
# types
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
naxisn = "NAXIS" + str(idx + 1)
if naxisn in self._header:
self._header[naxisn] = axis
else:
if idx == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(idx)
self._header.set(naxisn, axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header["NAXIS" + str(idx)]
except KeyError:
pass
if "BLANK" in self._header:
self._blank = self._header["BLANK"]
# Add BSCALE/BZERO to header if data is unsigned int.
self._update_pseudo_int_scale_keywords()
self._modified = False
def _update_header_scale_info(self, dtype=None):
"""
Delete BSCALE/BZERO from header if necessary.
"""
# Note that _dtype_for_bitpix determines the dtype based on the
# "original" values of bitpix, bscale, and bzero, stored in
# self._orig_bitpix, etc. It contains the logic for determining which
# special cases of BZERO/BSCALE, if any, are auto-detected as following
# the FITS unsigned int convention.
# Added original_was_unsigned with the intent of facilitating the
# special case of do_not_scale_image_data=True and uint=True
# eventually.
# FIXME: unused, maybe it should be useful?
# if self._dtype_for_bitpix() is not None:
# original_was_unsigned = self._dtype_for_bitpix().kind == 'u'
# else:
# original_was_unsigned = False
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1
):
return
if dtype is None:
dtype = self._dtype_for_bitpix()
if (
dtype is not None
and dtype.kind == "u"
and (self._scale_back or self._scale_back is None)
):
# Data is pseudo-unsigned integers, and the scale_back option
# was not explicitly set to False, so preserve all the scale
# factors
return
for keyword in ["BSCALE", "BZERO"]:
try:
del self._header[keyword]
# Since _update_header_scale_info can, currently, be called
# *after* _prewriteto(), replace these with blank cards so
# the header size doesn't change
self._header.append()
except KeyError:
pass
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self._header["BITPIX"] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self._header["BITPIX"]
self._blank = self._header.pop("BLANK", None)
def scale(self, type=None, option="old", bscale=None, bzero=None):
"""
Scale image data by using ``BSCALE``/``BZERO``.
Call to this method will scale `data` and update the keywords of
``BSCALE`` and ``BZERO`` in the HDU's header. This method should only
be used right before writing to the output file, as the data will be
scaled and is therefore not very usable after the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy
dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'``
etc.). If is `None`, use the current data type.
option : str, optional
How to scale the data: ``"old"`` uses the original ``BSCALE`` and
``BZERO`` values from when the data was read/created (defaulting to
1 and 0 if they don't exist). For integer data only, ``"minmax"``
uses the minimum and maximum of the data to scale. User-specified
``bscale``/``bzero`` values always take precedence.
bscale, bzero : int, optional
User-specified ``BSCALE`` and ``BZERO`` values
"""
# Disable blank support for now
self._scale_internal(
type=type, option=option, bscale=bscale, bzero=bzero, blank=None
)
def _scale_internal(
self, type=None, option="old", bscale=None, bzero=None, blank=0
):
"""
This is an internal implementation of the `scale` method, which
also supports handling BLANK properly.
TODO: This is only needed for fixing #3865 without introducing any
public API changes. We should support BLANK better when rescaling
data, and when that is added the need for this internal interface
should go away.
Note: the default of ``blank=0`` merely reflects the current behavior,
and is not necessarily a deliberate choice (better would be to disallow
conversion of floats to ints without specifying a BLANK if there are
NaN/inf values).
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale is not None and bzero is not None:
_scale = bscale
_zero = bzero
elif bscale is not None:
_scale = bscale
_zero = 0
elif bzero is not None:
_scale = 1
_zero = bzero
elif (
option == "old"
and self._orig_bscale is not None
and self._orig_bzero is not None
):
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == "minmax" and not issubclass(_type, np.floating):
if _is_dask_array(self.data):
min = self.data.min().compute()
max = self.data.max().compute()
else:
min = np.minimum.reduce(self.data.flat)
max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = min
_scale = (max - min) / (2.0**8 - 1)
else:
_zero = (max + min) / 2.0
# throw away -2^N
nbytes = 8 * _type().itemsize
_scale = (max - min) / (2.0**nbytes - 2)
else:
_scale = 1
_zero = 0
# Do the scaling
if _zero != 0:
if _is_dask_array(self.data):
self.data = self.data - _zero
else:
# 0.9.6.3 to avoid out of range error for BZERO = +32768
# We have to explicitly cast _zero to prevent numpy from raising an
# error when doing self.data -= zero, and we do this instead of
# self.data = self.data - zero to avoid doubling memory usage.
np.add(self.data, -_zero, out=self.data, casting="unsafe")
self._header["BZERO"] = _zero
else:
try:
del self._header["BZERO"]
except KeyError:
pass
if _scale and _scale != 1:
self.data = self.data / _scale
self._header["BSCALE"] = _scale
else:
try:
del self._header["BSCALE"]
except KeyError:
pass
# Set blanks
if blank is not None and issubclass(_type, np.integer):
# TODO: Perhaps check that the requested BLANK value fits in the
# integer type being scaled to?
self.data[np.isnan(self.data)] = blank
self._header["BLANK"] = blank
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type)
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
self._blank = blank
self._header["BITPIX"] = self._bitpix
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_blank = self._blank
def _verify(self, option="warn"):
# update_header can fix some things that would otherwise cause
# verification to fail, so do that now...
self.update_header()
self._verify_blank()
return super()._verify(option)
def _verify_blank(self):
# Probably not the best place for this (it should probably happen
# in _verify as well) but I want to be able to raise this warning
# both when the HDU is created and when written
if self._blank is None:
return
messages = []
# TODO: Once the FITSSchema framewhere is merged these warnings
# should be handled by the schema
if not _is_int(self._blank):
messages.append(
"Invalid value for 'BLANK' keyword in header: {!r} "
"The 'BLANK' keyword must be an integer. It will be "
"ignored in the meantime.".format(self._blank)
)
self._blank = None
if not self._bitpix > 0:
messages.append(
"Invalid 'BLANK' keyword in header. The 'BLANK' keyword "
"is only applicable to integer data, and will be ignored "
"in this HDU."
)
self._blank = None
for msg in messages:
warnings.warn(msg, VerifyWarning)
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self._scale_internal(
BITPIX2DTYPE[self._orig_bitpix], blank=self._orig_blank
)
self.update_header()
if not inplace and self._data_needs_rescale:
# Go ahead and load the scaled image data and update the header
# with the correct post-rescaling headers
_ = self.data
return super()._prewriteto(checksum, inplace)
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
elif _is_dask_array(self.data):
return self._writeinternal_dask(fileobj)
else:
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f">i{self.data.dtype.itemsize}",
)
should_swap = False
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = byteorder in swap_types
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _writeinternal_dask(self, fileobj):
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
raise NotImplementedError("This dtype isn't currently supported with dask.")
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = byteorder in swap_types
if should_swap:
from dask.utils import M
# NOTE: the inplace flag to byteswap needs to be False otherwise the array is
# byteswapped in place every time it is computed and this affects
# the input dask array.
output = output.map_blocks(M.byteswap, False).map_blocks(
M.newbyteorder, "S"
)
initial_position = fileobj.tell()
n_bytes = output.nbytes
# Extend the file n_bytes into the future
fileobj.seek(initial_position + n_bytes - 1)
fileobj.write(b"\0")
fileobj.flush()
if fileobj.fileobj_mode not in ("rb+", "wb+", "ab+"):
# Use another file handle if the current one is not in
# read/write mode
fp = open(fileobj.name, mode="rb+")
should_close = True
else:
fp = fileobj._file
should_close = False
try:
outmmap = mmap.mmap(
fp.fileno(), length=initial_position + n_bytes, access=mmap.ACCESS_WRITE
)
outarr = np.ndarray(
shape=output.shape,
dtype=output.dtype,
offset=initial_position,
buffer=outmmap,
)
output.store(outarr, lock=True, compute=True)
finally:
if should_close:
fp.close()
outmmap.close()
# On Windows closing the memmap causes the file pointer to return to 0, so
# we need to go back to the end of the data (since padding may be written
# after)
fileobj.seek(initial_position + n_bytes)
return n_bytes
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
if bitpix == 8 and self._orig_bzero == -128:
return np.dtype("int8")
for bits, dtype in (
(16, np.dtype("uint16")),
(32, np.dtype("uint32")),
(64, np.dtype("uint64")),
):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype("float64")
elif bitpix > 0: # scale integers to Float32
return np.dtype("float32")
def _convert_pseudo_integer(self, data):
"""
Handle "pseudo-unsigned" integers, if the user requested it. Returns
the converted data array if so; otherwise returns None.
In this case case, we don't need to handle BLANK to convert it to NAN,
since we can't do NaNs with integers, anyway, i.e. the user is
responsible for managing blanks.
"""
dtype = self._dtype_for_bitpix()
# bool(dtype) is always False--have to explicitly compare to None; this
# caused a fair amount of hair loss
if dtype is not None and dtype.kind == "u":
# Convert the input raw data into an unsigned integer array and
# then scale the data adjusting for the value of BZERO. Note that
# we subtract the value of BZERO instead of adding because of the
# way numpy converts the raw signed array into an unsigned array.
bits = dtype.itemsize * 8
data = np.array(data, dtype=dtype)
data -= np.uint64(1 << (bits - 1))
return data
def _get_scaled_image_data(self, offset, shape):
"""
Internal function for reading image data from a file and apply scale
factors to it. Normally this is used for the entire image, but it
supports alternate offset/shape for Section support.
"""
code = BITPIX2DTYPE[self._orig_bitpix]
raw_data = self._get_raw_data(shape, code, offset)
raw_data.dtype = raw_data.dtype.newbyteorder(">")
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1 and self._blank is None
):
# No further conversion of the data is necessary
return raw_data
try:
if self._file.strict_memmap:
raise ValueError(
"Cannot load a memory-mapped image: "
"BZERO/BSCALE/BLANK header keywords present. "
"Set memmap=False."
)
except AttributeError: # strict_memmap not set
pass
data = None
if not (self._orig_bzero == 0 and self._orig_bscale == 1):
data = self._convert_pseudo_integer(raw_data)
if data is None:
# In these cases, we end up with floating-point arrays and have to
# apply bscale and bzero. We may have to handle BLANK and convert
# to NaN in the resulting floating-point arrays.
# The BLANK keyword should only be applied for integer data (this
# is checked in __init__ but it can't hurt to double check here)
blanks = None
if self._blank is not None and self._bitpix > 0:
blanks = raw_data.flat == self._blank
# The size of blanks in bytes is the number of elements in
# raw_data.flat. However, if we use np.where instead we will
# only use 8 bytes for each index where the condition is true.
# So if the number of blank items is fewer than
# len(raw_data.flat) / 8, using np.where will use less memory
if blanks.sum() < len(blanks) / 8:
blanks = np.where(blanks)
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
data = np.array(raw_data, dtype=new_dtype)
else: # floating point cases
if self._file is not None and self._file.memmap:
data = raw_data.copy()
elif not raw_data.flags.writeable:
# create a writeable copy if needed
data = raw_data.copy()
# if not memmap, use the space already in memory
else:
data = raw_data
del raw_data
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
data += self._orig_bzero
if self._blank:
data.flat[blanks] = np.nan
return data
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
format = ""
else:
format = self.data.dtype.name
format = format[format.rfind(".") + 1 :]
else:
if self.shape and all(self.shape):
# Only show the format if all the dimensions are non-zero
# if data is not touched yet, use header info.
format = BITPIX2DTYPE[self._bitpix]
else:
format = ""
if (
format
and not self._do_not_scale_image_data
and (self._orig_bscale != 1 or self._orig_bzero != 0)
):
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
format += f" (rescales to {new_dtype.name})"
# Display shape in FITS-order
shape = tuple(reversed(self.shape))
return (self.name, self.ver, class_name, len(self._header), shape, format, "")
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
d = self.data
# First handle the special case where the data is unsigned integer
# 16, 32 or 64
if _is_pseudo_integer(self.data.dtype):
d = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f"i{self.data.dtype.itemsize}",
)
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
if d.dtype.str[0] != ">":
if d.flags.writeable:
byteswapped = True
d = d.byteswap(True)
d.dtype = d.dtype.newbyteorder(">")
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = d.byteswap(False)
d.dtype = d.dtype.newbyteorder(">")
byteswapped = False
else:
byteswapped = False
cs = self._compute_checksum(d.flatten().view(np.uint8))
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped and not _is_pseudo_integer(self.data.dtype):
d.byteswap(True)
d.dtype = d.dtype.newbyteorder("<")
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
class Section:
"""
Class enabling subsets of ImageHDU data to be loaded lazily via slicing.
Slices of this object load the corresponding section of an image array from
the underlying FITS file, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`astropy:data-sections` section of the Astropy documentation
for more details.
"""
def __init__(self, hdu):
self.hdu = hdu
@property
def shape(self):
# Implementing `.shape` enables `astropy.nddata.Cutout2D` to accept
# `ImageHDU.section` in place of `.data`.
return self.hdu.shape
def __getitem__(self, key):
"""Returns a slice of HDU data specified by `key`.
If the image HDU is backed by a file handle, this method will only read
the chunks of the file needed to extract `key`, which is useful in
situations where the file is located on a slow or remote file system
(e.g., cloud storage).
"""
if not isinstance(key, tuple):
key = (key,)
naxis = len(self.hdu.shape)
return_scalar = (
all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis
)
if not any(k is Ellipsis for k in key):
# We can always add a ... at the end, after making note of whether
# to return a scalar.
key += (Ellipsis,)
ellipsis_count = len([k for k in key if k is Ellipsis])
if len(key) - ellipsis_count > naxis or ellipsis_count > 1:
raise IndexError("too many indices for array")
# Insert extra dimensions as needed.
idx = next(i for i, k in enumerate(key + (Ellipsis,)) if k is Ellipsis)
key = key[:idx] + (slice(None),) * (naxis - len(key) + 1) + key[idx + 1 :]
return_0dim = (
all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis
)
dims = []
offset = 0
# Find all leading axes for which a single point is used.
for idx in range(naxis):
axis = self.hdu.shape[idx]
indx = _IndexInfo(key[idx], axis)
offset = offset * axis + indx.offset
if not _is_int(key[idx]):
dims.append(indx.npts)
break
is_contiguous = indx.contiguous
for jdx in range(idx + 1, naxis):
axis = self.hdu.shape[jdx]
indx = _IndexInfo(key[jdx], axis)
dims.append(indx.npts)
if indx.npts == axis and indx.contiguous:
# The offset needs to multiply the length of all remaining axes
offset *= axis
else:
is_contiguous = False
if is_contiguous:
dims = tuple(dims) or (1,)
bitpix = self.hdu._orig_bitpix
offset = self.hdu._data_offset + offset * abs(bitpix) // 8
# Note: the actual file read operations are delegated to
# `util._array_from_file` via `ImageHDU._get_scaled_image_data`
data = self.hdu._get_scaled_image_data(offset, dims)
else:
data = self._getdata(key)
if return_scalar:
data = data.item()
elif return_0dim:
data = data.squeeze()
return data
def _getdata(self, keys):
for idx, (key, axis) in enumerate(zip(keys, self.hdu.shape)):
if isinstance(key, slice):
ks = range(*key.indices(axis))
break
elif isiterable(key):
# Handle both integer and boolean arrays.
ks = np.arange(axis, dtype=int)[key]
break
# This should always break at some point if _getdata is called.
data = [self[keys[:idx] + (k,) + keys[idx + 1 :]] for k in ks]
if any(isinstance(key, slice) or isiterable(key) for key in keys[idx + 1 :]):
# data contains multidimensional arrays; combine them.
return np.array(data)
else:
# Only singleton dimensions remain; concatenate in a 1D array.
return np.concatenate([np.atleast_1d(array) for array in data])
class PrimaryHDU(_ImageBaseHDU):
"""
FITS primary HDU class.
"""
_default_name = "PRIMARY"
def __init__(
self,
data=None,
header=None,
do_not_scale_image_data=False,
ignore_blank=False,
uint=True,
scale_back=None,
):
"""
Construct a primary HDU.
Parameters
----------
data : array or ``astropy.io.fits.hdu.base.DELAYED``, optional
The data in the HDU.
header : `~astropy.io.fits.Header`, optional
The header to be used (as a template). If ``header`` is `None`, a
minimal header will be provided.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
ignore_blank : bool, optional
If `True`, the BLANK header keyword will be ignored if present.
Otherwise, pixels equal to this value will be replaced with
NaNs. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
"""
super().__init__(
data=data,
header=header,
do_not_scale_image_data=do_not_scale_image_data,
uint=uint,
ignore_blank=ignore_blank,
scale_back=scale_back,
)
# insert the keywords EXTEND
if header is None:
dim = self._header["NAXIS"]
if dim == 0:
dim = ""
self._header.set("EXTEND", True, after="NAXIS" + str(dim))
@classmethod
def match_header(cls, header):
card = header.cards[0]
# Due to problems discussed in #5808, we cannot assume the 'GROUPS'
# keyword to be True/False, have to check the value
return (
card.keyword == "SIMPLE"
and ("GROUPS" not in header or header["GROUPS"] != True) # noqa: E712
and card.value
)
def update_header(self):
super().update_header()
# Update the position of the EXTEND keyword if it already exists
if "EXTEND" in self._header:
if len(self._axes):
after = "NAXIS" + str(len(self._axes))
else:
after = "NAXIS"
self._header.set("EXTEND", after=after)
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
# The EXTEND keyword is only mandatory if the HDU has extensions; this
# condition is checked by the HDUList object. However, if we already
# have an EXTEND keyword check that its position is correct
if "EXTEND" in self._header:
naxis = self._header.get("NAXIS", 0)
self.req_cards(
"EXTEND", naxis + 3, lambda v: isinstance(v, bool), True, option, errs
)
return errs
class ImageHDU(_ImageBaseHDU, ExtensionHDU):
"""
FITS image extension HDU class.
"""
_extension = "IMAGE"
def __init__(
self,
data=None,
header=None,
name=None,
do_not_scale_image_data=False,
uint=True,
scale_back=None,
ver=None,
):
"""
Construct an image HDU.
Parameters
----------
data : array
The data in the HDU.
header : `~astropy.io.fits.Header`
The header to be used (as a template). If ``header`` is
`None`, a minimal header will be provided.
name : str, optional
The name of the HDU, will be the value of the keyword
``EXTNAME``.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
"""
# This __init__ currently does nothing differently from the base class,
# and is only explicitly defined for the docstring.
super().__init__(
data=data,
header=header,
name=name,
do_not_scale_image_data=do_not_scale_image_data,
uint=uint,
scale_back=scale_back,
ver=ver,
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension == cls._extension
def _verify(self, option="warn"):
"""
ImageHDU verify method.
"""
errs = super()._verify(option=option)
naxis = self._header.get("NAXIS", 0)
# PCOUNT must == 0, GCOUNT must == 1; the former is verified in
# ExtensionHDU._verify, however ExtensionHDU._verify allows PCOUNT
# to be >= 0, so we need to check it here
self.req_cards(
"PCOUNT", naxis + 3, lambda v: (_is_int(v) and v == 0), 0, option, errs
)
return errs
class _IndexInfo:
def __init__(self, indx, naxis):
if _is_int(indx):
if indx < 0: # support negative indexing
indx = indx + naxis
if 0 <= indx < naxis:
self.npts = 1
self.offset = indx
self.contiguous = True
else:
raise IndexError(f"Index {indx} out of range.")
elif isinstance(indx, slice):
start, stop, step = indx.indices(naxis)
self.npts = (stop - start) // step
self.offset = start
self.contiguous = step == 1
elif isiterable(indx):
self.npts = len(indx)
self.offset = 0
self.contiguous = False
else:
raise IndexError(f"Illegal index {indx}")
|
80dfcd3b177aa828582b201deb8dff0a528b3232009025e69e15d8c1584fa1e0 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import ctypes
import gc
import itertools
import math
import re
import time
import warnings
from contextlib import suppress
import numpy as np
from astropy.io.fits import conf
from astropy.io.fits._tiled_compression import compress_hdu, decompress_hdu_section
from astropy.io.fits._tiled_compression.utils import _data_shape, _n_tiles, _tile_shape
from astropy.io.fits.card import Card
from astropy.io.fits.column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES
from astropy.io.fits.column import TDEF_RE, ColDefs, Column
from astropy.io.fits.fitsrec import FITS_rec
from astropy.io.fits.header import Header
from astropy.io.fits.util import (
_get_array_mmap,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
)
from astropy.utils import lazyproperty
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.shapes import simplify_basic_index
from .base import BITPIX2DTYPE, DELAYED, DTYPE2BITPIX, ExtensionHDU
from .image import ImageHDU
from .table import BinTableHDU
# This global variable is used e.g., when calling fits.open with
# disable_image_compression which temporarily changes the global variable to
# False. This should ideally be refactored to avoid relying on global module
# variables.
COMPRESSION_ENABLED = True
# Quantization dithering method constants; these are right out of fitsio.h
NO_DITHER = -1
SUBTRACTIVE_DITHER_1 = 1
SUBTRACTIVE_DITHER_2 = 2
QUANTIZE_METHOD_NAMES = {
NO_DITHER: "NO_DITHER",
SUBTRACTIVE_DITHER_1: "SUBTRACTIVE_DITHER_1",
SUBTRACTIVE_DITHER_2: "SUBTRACTIVE_DITHER_2",
}
DITHER_SEED_CLOCK = 0
DITHER_SEED_CHECKSUM = -1
COMPRESSION_TYPES = (
"NOCOMPRESS",
"RICE_1",
"GZIP_1",
"GZIP_2",
"PLIO_1",
"HCOMPRESS_1",
)
# Default compression parameter values
DEFAULT_COMPRESSION_TYPE = "RICE_1"
DEFAULT_QUANTIZE_LEVEL = 16.0
DEFAULT_QUANTIZE_METHOD = NO_DITHER
DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK
DEFAULT_HCOMP_SCALE = 0
DEFAULT_HCOMP_SMOOTH = 0
DEFAULT_BLOCK_SIZE = 32
DEFAULT_BYTE_PIX = 4
CMTYPE_ALIASES = {"RICE_ONE": "RICE_1"}
COMPRESSION_KEYWORDS = {
"ZIMAGE",
"ZCMPTYPE",
"ZBITPIX",
"ZNAXIS",
"ZMASKCMP",
"ZSIMPLE",
"ZTENSION",
"ZEXTEND",
}
class CompImageHeader(Header):
"""
Header object for compressed image HDUs designed to keep the compression
header and the underlying image header properly synchronized.
This essentially wraps the image header, so that all values are read from
and written to the image header. However, updates to the image header will
also update the table header where appropriate.
Note that if no image header is passed in, the code will instantiate a
regular `~astropy.io.fits.Header`.
"""
# TODO: The difficulty of implementing this screams a need to rewrite this
# module
_keyword_remaps = {
"SIMPLE": "ZSIMPLE",
"XTENSION": "ZTENSION",
"BITPIX": "ZBITPIX",
"NAXIS": "ZNAXIS",
"EXTEND": "ZEXTEND",
"BLOCKED": "ZBLOCKED",
"PCOUNT": "ZPCOUNT",
"GCOUNT": "ZGCOUNT",
"CHECKSUM": "ZHECKSUM",
"DATASUM": "ZDATASUM",
}
_zdef_re = re.compile(r"(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?")
_compression_keywords = set(_keyword_remaps.values()).union(
["ZIMAGE", "ZCMPTYPE", "ZMASKCMP", "ZQUANTIZ", "ZDITHER0"]
)
_indexed_compression_keywords = {"ZNAXIS", "ZTILE", "ZNAME", "ZVAL"}
# TODO: Once it place it should be possible to manage some of this through
# the schema system, but it's not quite ready for that yet. Also it still
# makes more sense to change CompImageHDU to subclass ImageHDU :/
def __new__(cls, table_header, image_header=None):
# 2019-09-14 (MHvK): No point wrapping anything if no image_header is
# given. This happens if __getitem__ and copy are called - our super
# class will aim to initialize a new, possibly partially filled
# header, but we cannot usefully deal with that.
# TODO: the above suggests strongly we should *not* subclass from
# Header. See also comment above about the need for reorganization.
if image_header is None:
return Header(table_header)
else:
return super().__new__(cls)
def __init__(self, table_header, image_header):
self._cards = image_header._cards
self._keyword_indices = image_header._keyword_indices
self._rvkc_indices = image_header._rvkc_indices
self._modified = image_header._modified
self._table_header = table_header
# We need to override and Header methods that can modify the header, and
# ensure that they sync with the underlying _table_header
def __setitem__(self, key, value):
# This isn't pretty, but if the `key` is either an int or a tuple we
# need to figure out what keyword name that maps to before doing
# anything else; these checks will be repeated later in the
# super().__setitem__ call but I don't see another way around it
# without some major refactoring
if self._set_slice(key, value, self):
return
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
# We don't want to specify and index otherwise, because that will
# break the behavior for new keywords and for commentary keywords
keyword, index = key, None
if self._is_reserved_keyword(keyword):
return
super().__setitem__(key, value)
if index is not None:
remapped_keyword = self._remap_keyword(keyword)
self._table_header[remapped_keyword, index] = value
# Else this will pass through to ._update
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# If given a slice pass that on to the superclass and bail out
# early; we only want to make updates to _table_header when given
# a key specifying a single keyword
return super().__delitem__(key)
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
keyword, index = key, None
if key not in self:
raise KeyError(f"Keyword {key!r} not found.")
super().__delitem__(key)
remapped_keyword = self._remap_keyword(keyword)
if remapped_keyword in self._table_header:
if index is not None:
del self._table_header[(remapped_keyword, index)]
else:
del self._table_header[remapped_keyword]
def append(self, card=None, useblanks=True, bottom=False, end=False):
# This logic unfortunately needs to be duplicated from the base class
# in order to determine the keyword
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
"The value appended to a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if self._is_reserved_keyword(card.keyword):
return
super().append(card=card, useblanks=useblanks, bottom=bottom, end=end)
remapped_keyword = self._remap_keyword(card.keyword)
# card.keyword strips the HIERARCH if present so this must be added
# back to avoid a warning.
if str(card).startswith("HIERARCH ") and not remapped_keyword.startswith(
"HIERARCH "
):
remapped_keyword = "HIERARCH " + remapped_keyword
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.append may have already deleted a blank card in the table
# header, thanks to inheritance: Header.append calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__deltitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.append(card=card, useblanks=False, bottom=bottom, end=end)
def insert(self, key, card, useblanks=True, after=False):
if isinstance(key, int):
# Determine condition to pass through to append
if after:
if key == -1:
key = len(self._cards)
else:
key += 1
if key >= len(self._cards):
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
"The value inserted into a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if self._is_reserved_keyword(card.keyword):
return
# Now the tricky part is to determine where to insert in the table
# header. If given a numerical index we need to map that to the
# corresponding index in the table header. Although rare, there may be
# cases where there is no mapping in which case we just try the same
# index
# NOTE: It is crucial that remapped_index in particular is figured out
# before the image header is modified
remapped_index = self._remap_index(key)
remapped_keyword = self._remap_keyword(card.keyword)
super().insert(key, card, useblanks=useblanks, after=after)
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.insert may have already deleted a blank card in the table
# header, thanks to inheritance: Header.insert calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__delitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.insert(remapped_index, card, useblanks=False, after=after)
def _update(self, card):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
super()._update(card)
if keyword in Card._commentary_keywords:
# Otherwise this will result in a duplicate insertion
return
remapped_keyword = self._remap_keyword(keyword)
self._table_header._update((remapped_keyword,) + card[1:])
# Last piece needed (I think) for synchronizing with the real header
# This one is tricky since _relativeinsert calls insert
def _relativeinsert(self, card, before=None, after=None, replace=False):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
# Now we have to figure out how to remap 'before' and 'after'
if before is None:
if isinstance(after, int):
remapped_after = self._remap_index(after)
else:
remapped_after = self._remap_keyword(after)
remapped_before = None
else:
if isinstance(before, int):
remapped_before = self._remap_index(before)
else:
remapped_before = self._remap_keyword(before)
remapped_after = None
super()._relativeinsert(card, before=before, after=after, replace=replace)
remapped_keyword = self._remap_keyword(keyword)
card = Card(remapped_keyword, card[1], card[2])
self._table_header._relativeinsert(
card, before=remapped_before, after=remapped_after, replace=replace
)
@classmethod
def _is_reserved_keyword(cls, keyword, warn=True):
msg = (
"Keyword {!r} is reserved for use by the FITS Tiled Image "
"Convention and will not be stored in the header for the "
"image being compressed.".format(keyword)
)
if keyword == "TFIELDS":
if warn:
warnings.warn(msg)
return True
m = TDEF_RE.match(keyword)
if m and m.group("label").upper() in TABLE_KEYWORD_NAMES:
if warn:
warnings.warn(msg)
return True
m = cls._zdef_re.match(keyword)
if m:
label = m.group("label").upper()
num = m.group("num")
if num is not None and label in cls._indexed_compression_keywords:
if warn:
warnings.warn(msg)
return True
elif label in cls._compression_keywords:
if warn:
warnings.warn(msg)
return True
return False
@classmethod
def _remap_keyword(cls, keyword):
# Given a keyword that one might set on an image, remap that keyword to
# the name used for it in the COMPRESSED HDU header
# This is mostly just a lookup in _keyword_remaps, but needs handling
# for NAXISn keywords
is_naxisn = False
if keyword[:5] == "NAXIS":
with suppress(ValueError):
index = int(keyword[5:])
is_naxisn = index > 0
if is_naxisn:
return f"ZNAXIS{index}"
# If the keyword does not need to be remapped then just return the
# original keyword
return cls._keyword_remaps.get(keyword, keyword)
def _remap_index(self, idx):
# Given an integer index into this header, map that to the index in the
# table header for the same card. If the card doesn't exist in the
# table header (generally should *not* be the case) this will just
# return the same index
# This *does* also accept a keyword or (keyword, repeat) tuple and
# obtains the associated numerical index with self._cardindex
if not isinstance(idx, int):
idx = self._cardindex(idx)
keyword, repeat = self._keyword_from_index(idx)
remapped_insert_keyword = self._remap_keyword(keyword)
with suppress(IndexError, KeyError):
idx = self._table_header._cardindex((remapped_insert_keyword, repeat))
return idx
def clear(self):
"""
Remove all cards from the header.
"""
self._table_header.clear()
super().clear()
# TODO: Fix this class so that it doesn't actually inherit from BinTableHDU,
# but instead has an internal BinTableHDU reference
class CompImageHDU(BinTableHDU):
"""
Compressed Image HDU class.
"""
_manages_own_heap = True
"""
The calls to CFITSIO lay out the heap data in memory, and we write it out
the same way CFITSIO organizes it. In principle this would break if a user
manually changes the underlying compressed data by hand, but there is no
reason they would want to do that (and if they do that's their
responsibility).
"""
_load_variable_length_data = False
"""
We don't want to always load all the tiles so by setting this option
we can then access the tiles as needed.
"""
_default_name = "COMPRESSED_IMAGE"
@deprecated_renamed_argument(
"tile_size",
None,
since="5.3",
message="The tile_size argument has been deprecated. Use tile_shape "
"instead, but note that this should be given in the reverse "
"order to tile_size (tile_shape should be in Numpy C order).",
)
def __init__(
self,
data=None,
header=None,
name=None,
compression_type=DEFAULT_COMPRESSION_TYPE,
tile_shape=None,
hcomp_scale=DEFAULT_HCOMP_SCALE,
hcomp_smooth=DEFAULT_HCOMP_SMOOTH,
quantize_level=DEFAULT_QUANTIZE_LEVEL,
quantize_method=DEFAULT_QUANTIZE_METHOD,
dither_seed=DEFAULT_DITHER_SEED,
do_not_scale_image_data=False,
uint=False,
scale_back=False,
tile_size=None,
):
"""
Parameters
----------
data : array, optional
Uncompressed image data
header : `~astropy.io.fits.Header`, optional
Header to be associated with the image; when reading the HDU from a
file (data=DELAYED), the header read from the file
name : str, optional
The ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name ``COMPRESSED_IMAGE`` is
used.
compression_type : str, optional
Compression algorithm: one of
``'RICE_1'``, ``'RICE_ONE'``, ``'PLIO_1'``, ``'GZIP_1'``,
``'GZIP_2'``, ``'HCOMPRESS_1'``, ``'NOCOMPRESS'``
tile_shape : tuple, optional
Compression tile shape, which should be specified using the default
Numpy convention for array shapes (C order). The default is to
treat each row of image as a tile.
hcomp_scale : float, optional
HCOMPRESS scale parameter
hcomp_smooth : float, optional
HCOMPRESS smooth parameter
quantize_level : float, optional
Floating point quantization level; see note below
quantize_method : int, optional
Floating point quantization dithering method; can be either
``NO_DITHER`` (-1; default), ``SUBTRACTIVE_DITHER_1`` (1), or
``SUBTRACTIVE_DITHER_2`` (2); see note below
dither_seed : int, optional
Random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), ``DITHER_SEED_CLOCK`` (0; default), or
``DITHER_SEED_CHECKSUM`` (-1); see note below
Notes
-----
The astropy.io.fits package supports 2 methods of image compression:
1) The entire FITS file may be externally compressed with the gzip
or pkzip utility programs, producing a ``*.gz`` or ``*.zip``
file, respectively. When reading compressed files of this type,
Astropy first uncompresses the entire file into a temporary file
before performing the requested read operations. The
astropy.io.fits package does not support writing to these types
of compressed files. This type of compression is supported in
the ``_File`` class, not in the `CompImageHDU` class. The file
compression type is recognized by the ``.gz`` or ``.zip`` file
name extension.
2) The `CompImageHDU` class supports the FITS tiled image
compression convention in which the image is subdivided into a
grid of rectangular tiles, and each tile of pixels is
individually compressed. The details of this FITS compression
convention are described at the `FITS Support Office web site
<https://fits.gsfc.nasa.gov/registry/tilecompression.html>`_.
Basically, the compressed image tiles are stored in rows of a
variable length array column in a FITS binary table. The
astropy.io.fits recognizes that this binary table extension
contains an image and treats it as if it were an image
extension. Under this tile-compression format, FITS header
keywords remain uncompressed. At this time, Astropy does not
support the ability to extract and uncompress sections of the
image without having to uncompress the entire image.
The astropy.io.fits package supports 3 general-purpose compression
algorithms plus one other special-purpose compression technique that is
designed for data masks with positive integer pixel values. The 3
general purpose algorithms are GZIP, Rice, and HCOMPRESS, and the
special-purpose technique is the IRAF pixel list compression technique
(PLIO). The ``compression_type`` parameter defines the compression
algorithm to be used.
The FITS image can be subdivided into any desired rectangular grid of
compression tiles. With the GZIP, Rice, and PLIO algorithms, the
default is to take each row of the image as a tile. The HCOMPRESS
algorithm is inherently 2-dimensional in nature, so the default in this
case is to take 16 rows of the image per tile. In most cases, it makes
little difference what tiling pattern is used, so the default tiles are
usually adequate. In the case of very small images, it could be more
efficient to compress the whole image as a single tile. Note that the
image dimensions are not required to be an integer multiple of the tile
dimensions; if not, then the tiles at the edges of the image will be
smaller than the other tiles. The ``tile_shape`` parameter may be
provided as a list of tile sizes, one for each dimension in the image.
For example a ``tile_shape`` value of ``(100,100)`` would divide a 300 X
300 image into 9 100 X 100 tiles.
The 4 supported image compression algorithms are all 'lossless' when
applied to integer FITS images; the pixel values are preserved exactly
with no loss of information during the compression and uncompression
process. In addition, the HCOMPRESS algorithm supports a 'lossy'
compression mode that will produce larger amount of image compression.
This is achieved by specifying a non-zero value for the ``hcomp_scale``
parameter. Since the amount of compression that is achieved depends
directly on the RMS noise in the image, it is usually more convenient
to specify the ``hcomp_scale`` factor relative to the RMS noise.
Setting ``hcomp_scale = 2.5`` means use a scale factor that is 2.5
times the calculated RMS noise in the image tile. In some cases it may
be desirable to specify the exact scaling to be used, instead of
specifying it relative to the calculated noise value. This may be done
by specifying the negative of the desired scale value (typically in the
range -2 to -100).
Very high compression factors (of 100 or more) can be achieved by using
large ``hcomp_scale`` values, however, this can produce undesirable
'blocky' artifacts in the compressed image. A variation of the
HCOMPRESS algorithm (called HSCOMPRESS) can be used in this case to
apply a small amount of smoothing of the image when it is uncompressed
to help cover up these artifacts. This smoothing is purely cosmetic
and does not cause any significant change to the image pixel values.
Setting the ``hcomp_smooth`` parameter to 1 will engage the smoothing
algorithm.
Floating point FITS images (which have ``BITPIX`` = -32 or -64) usually
contain too much 'noise' in the least significant bits of the mantissa
of the pixel values to be effectively compressed with any lossless
algorithm. Consequently, floating point images are first quantized
into scaled integer pixel values (and thus throwing away much of the
noise) before being compressed with the specified algorithm (either
GZIP, RICE, or HCOMPRESS). This technique produces much higher
compression factors than simply using the GZIP utility to externally
compress the whole FITS file, but it also means that the original
floating point value pixel values are not exactly preserved. When done
properly, this integer scaling technique will only discard the
insignificant noise while still preserving all the real information in
the image. The amount of precision that is retained in the pixel
values is controlled by the ``quantize_level`` parameter. Larger
values will result in compressed images whose pixels more closely match
the floating point pixel values, but at the same time the amount of
compression that is achieved will be reduced. Users should experiment
with different values for this parameter to determine the optimal value
that preserves all the useful information in the image, without
needlessly preserving all the 'noise' which will hurt the compression
efficiency.
The default value for the ``quantize_level`` scale factor is 16, which
means that scaled integer pixel values will be quantized such that the
difference between adjacent integer values will be 1/16th of the noise
level in the image background. An optimized algorithm is used to
accurately estimate the noise in the image. As an example, if the RMS
noise in the background pixels of an image = 32.0, then the spacing
between adjacent scaled integer pixel values will equal 2.0 by default.
Note that the RMS noise is independently calculated for each tile of
the image, so the resulting integer scaling factor may fluctuate
slightly for each tile. In some cases, it may be desirable to specify
the exact quantization level to be used, instead of specifying it
relative to the calculated noise value. This may be done by specifying
the negative of desired quantization level for the value of
``quantize_level``. In the previous example, one could specify
``quantize_level = -2.0`` so that the quantized integer levels differ
by 2.0. Larger negative values for ``quantize_level`` means that the
levels are more coarsely-spaced, and will produce higher compression
factors.
The quantization algorithm can also apply one of two random dithering
methods in order to reduce bias in the measured intensity of background
regions. The default method, specified with the constant
``SUBTRACTIVE_DITHER_1`` adds dithering to the zero-point of the
quantization array itself rather than adding noise to the actual image.
The random noise is added on a pixel-by-pixel basis, so in order
restore each pixel from its integer value to its floating point value
it is necessary to replay the same sequence of random numbers for each
pixel (see below). The other method, ``SUBTRACTIVE_DITHER_2``, is
exactly like the first except that before dithering any pixel with a
floating point value of ``0.0`` is replaced with the special integer
value ``-2147483647``. When the image is uncompressed, pixels with
this value are restored back to ``0.0`` exactly. Finally, a value of
``NO_DITHER`` disables dithering entirely.
As mentioned above, when using the subtractive dithering algorithm it
is necessary to be able to generate a (pseudo-)random sequence of noise
for each pixel, and replay that same sequence upon decompressing. To
facilitate this, a random seed between 1 and 10000 (inclusive) is used
to seed a random number generator, and that seed is stored in the
``ZDITHER0`` keyword in the header of the compressed HDU. In order to
use that seed to generate the same sequence of random numbers the same
random number generator must be used at compression and decompression
time; for that reason the tiled image convention provides an
implementation of a very simple pseudo-random number generator. The
seed itself can be provided in one of three ways, controllable by the
``dither_seed`` argument: It may be specified manually, or it may be
generated arbitrarily based on the system's clock
(``DITHER_SEED_CLOCK``) or based on a checksum of the pixels in the
image's first tile (``DITHER_SEED_CHECKSUM``). The clock-based method
is the default, and is sufficient to ensure that the value is
reasonably "arbitrary" and that the same seed is unlikely to be
generated sequentially. The checksum method, on the other hand,
ensures that the same seed is used every time for a specific image.
This is particularly useful for software testing as it ensures that the
same image will always use the same seed.
"""
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
if tile_shape is None and tile_size is not None:
tile_shape = tuple(tile_size[::-1])
elif tile_shape is not None and tile_size is not None:
raise ValueError(
"Cannot specify both tile_size and tile_shape. "
"Note that tile_size is deprecated and tile_shape "
"alone should be used."
)
if data is DELAYED:
# Reading the HDU from a file
super().__init__(data=data, header=header)
else:
# Create at least a skeleton HDU that matches the input
# header and data (if any were input)
super().__init__(data=None, header=header)
# Store the input image data
self.data = data
# Update the table header (_header) to the compressed
# image format and to match the input data (if any);
# Create the image header (_image_header) from the input
# image header (if any) and ensure it matches the input
# data; Create the initially empty table data array to
# hold the compressed data.
self._update_header_data(
header,
name,
compression_type=compression_type,
tile_shape=tile_shape,
hcomp_scale=hcomp_scale,
hcomp_smooth=hcomp_smooth,
quantize_level=quantize_level,
quantize_method=quantize_method,
dither_seed=dither_seed,
)
# TODO: A lot of this should be passed on to an internal image HDU o
# something like that, see ticket #88
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
self._axes = [
self._header.get("ZNAXIS" + str(axis + 1), 0)
for axis in range(self._header.get("ZNAXIS", 0))
]
# store any scale factors from the table header
if do_not_scale_image_data:
self._bzero = 0
self._bscale = 1
else:
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
self._bitpix = self._header["ZBITPIX"]
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_bitpix = self._bitpix
def _remove_unnecessary_default_extnames(self, header):
"""Remove default EXTNAME values if they are unnecessary.
Some data files (eg from CFHT) can have the default EXTNAME and
an explicit value. This method removes the default if a more
specific header exists. It also removes any duplicate default
values.
"""
if "EXTNAME" in header:
indices = header._keyword_indices["EXTNAME"]
# Only continue if there is more than one found
n_extname = len(indices)
if n_extname > 1:
extnames_to_remove = [
index for index in indices if header[index] == self._default_name
]
if len(extnames_to_remove) == n_extname:
# Keep the first (they are all the same)
extnames_to_remove.pop(0)
# Remove them all in reverse order to keep the index unchanged.
for index in reversed(sorted(extnames_to_remove)):
del header[index]
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
# Similar to base class but uses .header rather than ._header
return str(self.header.get("EXTNAME", self._default_name))
@name.setter
def name(self, value):
# This is a copy of the base class but using .header instead
# of ._header to ensure that the name stays in sync.
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if "EXTNAME" in self.header:
self.header["EXTNAME"] = value
else:
self.header["EXTNAME"] = (value, "extension name")
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != "XTENSION":
return False
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
if xtension not in ("BINTABLE", "A3DTABLE"):
return False
if "ZIMAGE" not in header or not header["ZIMAGE"]:
return False
return COMPRESSION_ENABLED
def _update_header_data(
self,
image_header,
name=None,
compression_type=None,
tile_shape=None,
hcomp_scale=None,
hcomp_smooth=None,
quantize_level=None,
quantize_method=None,
dither_seed=None,
):
"""
Update the table header (`_header`) to the compressed
image format and to match the input data (if any). Create
the image header (`_image_header`) from the input image
header (if any) and ensure it matches the input
data. Create the initially-empty table data array to hold
the compressed data.
This method is mainly called internally, but a user may wish to
call this method after assigning new data to the `CompImageHDU`
object that is of a different type.
Parameters
----------
image_header : `~astropy.io.fits.Header`
header to be associated with the image
name : str, optional
the ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name 'COMPRESSED_IMAGE' is used
compression_type : str, optional
compression algorithm 'RICE_1', 'PLIO_1', 'GZIP_1', 'GZIP_2',
'HCOMPRESS_1', 'NOCOMPRESS'; if this value is `None`, use value
already in the header; if no value already in the header, use
'RICE_1'
tile_shape : tuple of int, optional
compression tile shape (in C order); if this value is `None`, use
value already in the header; if no value already in the header,
treat each row of image as a tile
hcomp_scale : float, optional
HCOMPRESS scale parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 1
hcomp_smooth : float, optional
HCOMPRESS smooth parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 0
quantize_level : float, optional
floating point quantization level; if this value is `None`, use the
value already in the header; if no value already in header, use 16
quantize_method : int, optional
floating point quantization dithering method; can be either
NO_DITHER (-1), SUBTRACTIVE_DITHER_1 (1; default), or
SUBTRACTIVE_DITHER_2 (2)
dither_seed : int, optional
random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), DITHER_SEED_CLOCK (0; default), or
DITHER_SEED_CHECKSUM (-1)
"""
# Clean up EXTNAME duplicates
self._remove_unnecessary_default_extnames(self._header)
image_hdu = ImageHDU(data=self.data, header=self._header)
self._image_header = CompImageHeader(self._header, image_hdu.header)
self._axes = image_hdu._axes
del image_hdu
# Determine based on the size of the input data whether to use the Q
# column format to store compressed data or the P format.
# The Q format is used only if the uncompressed data is larger than
# 4 GB. This is not a perfect heuristic, as one can contrive an input
# array which, when compressed, the entire binary table representing
# the compressed data is larger than 4GB. That said, this is the same
# heuristic used by CFITSIO, so this should give consistent results.
# And the cases where this heuristic is insufficient are extreme and
# almost entirely contrived corner cases, so it will do for now
if self._has_data:
huge_hdu = self.data.nbytes > 2**32
else:
huge_hdu = False
# Update the extension name in the table header
if not name and "EXTNAME" not in self._header:
# Do not sync this with the image header since the default
# name is specific to the table header.
self._header.set(
"EXTNAME",
self._default_name,
"name of this binary table extension",
after="TFIELDS",
)
elif name:
# Force the name into table and image headers.
self.name = name
# Set the compression type in the table header.
if compression_type:
if compression_type not in COMPRESSION_TYPES:
warnings.warn(
"Unknown compression type provided (supported are {}). "
"Default ({}) compression will be used.".format(
", ".join(map(repr, COMPRESSION_TYPES)),
DEFAULT_COMPRESSION_TYPE,
),
AstropyUserWarning,
)
compression_type = DEFAULT_COMPRESSION_TYPE
self._header.set(
"ZCMPTYPE", compression_type, "compression algorithm", after="TFIELDS"
)
else:
compression_type = self.compression_type
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
# If the input image header had BSCALE/BZERO cards, then insert
# them in the table header.
if image_header:
bzero = image_header.get("BZERO", 0.0)
bscale = image_header.get("BSCALE", 1.0)
after_keyword = "EXTNAME"
if bscale != 1.0:
self._header.set("BSCALE", bscale, after=after_keyword)
after_keyword = "BSCALE"
if bzero != 0.0:
self._header.set("BZERO", bzero, after=after_keyword)
try:
bitpix_comment = image_header.comments["BITPIX"]
except (AttributeError, KeyError):
bitpix_comment = "data type of original image"
try:
naxis_comment = image_header.comments["NAXIS"]
except (AttributeError, KeyError):
naxis_comment = "dimension of original image"
# Set the label for the first column in the table
self._header.set(
"TTYPE1", "COMPRESSED_DATA", "label for field 1", after="TFIELDS"
)
# Set the data format for the first column. It is dependent
# on the requested compression type.
if compression_type == "PLIO_1":
tform1 = "1QI" if huge_hdu else "1PI"
else:
tform1 = "1QB" if huge_hdu else "1PB"
self._header.set(
"TFORM1",
tform1,
"data format of field: variable length array",
after="TTYPE1",
)
# Create the first column for the table. This column holds the
# compressed data.
col1 = Column(name=self._header["TTYPE1"], format=tform1)
# Create the additional columns required for floating point
# data and calculate the width of the output table.
zbitpix = self._image_header["BITPIX"]
if zbitpix < 0 and quantize_level != 0.0:
# floating point image has 'COMPRESSED_DATA',
# 'UNCOMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using
# lossless compression, per CFITSIO)
ncols = 4
# CFITSIO 3.28 and up automatically use the GZIP_COMPRESSED_DATA
# store floating point data that couldn't be quantized, instead
# of the UNCOMPRESSED_DATA column. There's no way to control
# this behavior so the only way to determine which behavior will
# be employed is via the CFITSIO version
ttype2 = "GZIP_COMPRESSED_DATA"
# The required format for the GZIP_COMPRESSED_DATA is actually
# missing from the standard docs, but CFITSIO suggests it
# should be 1PB, which is logical.
tform2 = "1QB" if huge_hdu else "1PB"
# Set up the second column for the table that will hold any
# uncompressable data.
self._header.set("TTYPE2", ttype2, "label for field 2", after="TFORM1")
self._header.set(
"TFORM2",
tform2,
"data format of field: variable length array",
after="TTYPE2",
)
col2 = Column(name=ttype2, format=tform2)
# Set up the third column for the table that will hold
# the scale values for quantized data.
self._header.set("TTYPE3", "ZSCALE", "label for field 3", after="TFORM2")
self._header.set(
"TFORM3", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE3"
)
col3 = Column(name=self._header["TTYPE3"], format=self._header["TFORM3"])
# Set up the fourth column for the table that will hold
# the zero values for the quantized data.
self._header.set("TTYPE4", "ZZERO", "label for field 4", after="TFORM3")
self._header.set(
"TFORM4", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE4"
)
after = "TFORM4"
col4 = Column(name=self._header["TTYPE4"], format=self._header["TFORM4"])
# Create the ColDefs object for the table
cols = ColDefs([col1, col2, col3, col4])
else:
# default table has just one 'COMPRESSED_DATA' column
ncols = 1
after = "TFORM1"
# remove any header cards for the additional columns that
# may be left over from the previous data
to_remove = ["TTYPE2", "TFORM2", "TTYPE3", "TFORM3", "TTYPE4", "TFORM4"]
for k in to_remove:
try:
del self._header[k]
except KeyError:
pass
# Create the ColDefs object for the table
cols = ColDefs([col1])
# Update the table header with the width of the table, the
# number of fields in the table, the indicator for a compressed
# image HDU, the data type of the image data and the number of
# dimensions in the image data array.
self._header.set("NAXIS1", cols.dtype.itemsize, "width of table in bytes")
self._header.set(
"TFIELDS", ncols, "number of fields in each row", after="GCOUNT"
)
self._header.set(
"ZIMAGE", True, "extension contains compressed image", after=after
)
self._header.set("ZBITPIX", zbitpix, bitpix_comment, after="ZIMAGE")
self._header.set(
"ZNAXIS", self._image_header["NAXIS"], naxis_comment, after="ZBITPIX"
)
# Strip the table header of all the ZNAZISn and ZTILEn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
try:
del self._header["ZNAXIS" + str(idx)]
del self._header["ZTILE" + str(idx)]
except KeyError:
break
# Verify that any input tile size parameter is the appropriate
# size to match the HDU's data.
naxis = self._image_header["NAXIS"]
if not tile_shape:
tile_shape = []
elif len(tile_shape) != naxis:
warnings.warn(
"Provided tile size not appropriate for the data. "
"Default tile size will be used.",
AstropyUserWarning,
)
tile_shape = []
# Set default tile dimensions for HCOMPRESS_1
if compression_type == "HCOMPRESS_1":
if self._image_header["NAXIS1"] < 4 or self._image_header["NAXIS2"] < 4:
raise ValueError("Hcompress minimum image dimension is 4 pixels")
elif tile_shape:
if tile_shape[-1] < 4 or tile_shape[-2] < 4:
# user specified tile size is too small
raise ValueError("Hcompress minimum tile dimension is 4 pixels")
major_dims = len([ts for ts in tile_shape if ts > 1])
if major_dims > 2:
raise ValueError(
"HCOMPRESS can only support 2-dimensional tile sizes."
"All but two of the tile_shape dimensions must be set "
"to 1."
)
if tile_shape and (tile_shape[-1] == 0 and tile_shape[-2] == 0):
# compress the whole image as a single tile
tile_shape[-1] = self._image_header["NAXIS1"]
tile_shape[-2] = self._image_header["NAXIS2"]
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_shape[i] = 1
elif not tile_shape:
# The Hcompress algorithm is inherently 2D in nature, so the
# row by row tiling that is used for other compression
# algorithms is not appropriate. If the image has less than 30
# rows, then the entire image will be compressed as a single
# tile. Otherwise the tiles will consist of 16 rows of the
# image. This keeps the tiles to a reasonable size, and it
# also includes enough rows to allow good compression
# efficiency. It the last tile of the image happens to contain
# less than 4 rows, then find another tile size with between 14
# and 30 rows (preferably even), so that the last tile has at
# least 4 rows.
# 1st tile dimension is the row length of the image
tile_shape = [self._image_header["NAXIS1"]]
if self._image_header["NAXIS2"] <= 30:
tile_shape.insert(0, self._image_header["NAXIS1"])
else:
# look for another good tile dimension
naxis2 = self._image_header["NAXIS2"]
for dim in [16, 24, 20, 30, 28, 26, 22, 18, 14]:
if naxis2 % dim == 0 or naxis2 % dim > 3:
tile_shape.insert(0, dim)
break
else:
tile_shape.insert(0, 17)
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_shape.insert(0, 1)
# check if requested tile size causes the last tile to have
# less than 4 pixels
remain = self._image_header["NAXIS1"] % tile_shape[-1] # 1st dimen
original_tile_shape = tile_shape[:]
if remain > 0 and remain < 4:
tile_shape[-1] += 1 # try increasing tile size by 1
remain = self._image_header["NAXIS1"] % tile_shape[-1]
if remain > 0 and remain < 4:
raise ValueError(
"Last tile along 1st dimension has less than 4 pixels"
)
remain = self._image_header["NAXIS2"] % tile_shape[-2] # 2nd dimen
if remain > 0 and remain < 4:
tile_shape[-2] += 1 # try increasing tile size by 1
remain = self._image_header["NAXIS2"] % tile_shape[-2]
if remain > 0 and remain < 4:
raise ValueError(
"Last tile along 2nd dimension has less than 4 pixels"
)
if tile_shape != original_tile_shape:
warnings.warn(
f"The tile shape should be such that no tiles have "
f"fewer than 4 pixels. The tile shape has "
f"automatically been changed from {original_tile_shape} "
f"to {tile_shape}, but in future this will raise an "
f"error and the correct tile shape should be specified "
f"directly.",
AstropyDeprecationWarning,
)
# Set up locations for writing the next cards in the header.
last_znaxis = "ZNAXIS"
if self._image_header["NAXIS"] > 0:
after1 = "ZNAXIS1"
else:
after1 = "ZNAXIS"
# Calculate the number of rows in the output table and
# write the ZNAXISn and ZTILEn cards to the table header.
nrows = 0
for idx, axis in enumerate(self._axes):
naxis = "NAXIS" + str(idx + 1)
znaxis = "ZNAXIS" + str(idx + 1)
ztile = "ZTILE" + str(idx + 1)
if tile_shape and len(tile_shape) >= idx + 1:
ts = tile_shape[len(self._axes) - 1 - idx]
else:
if ztile not in self._header:
# Default tile size
if not idx:
ts = self._image_header["NAXIS1"]
else:
ts = 1
else:
ts = self._header[ztile]
tile_shape.insert(0, ts)
if not nrows:
nrows = (axis - 1) // ts + 1
else:
nrows *= (axis - 1) // ts + 1
if image_header and naxis in image_header:
self._header.set(
znaxis, axis, image_header.comments[naxis], after=last_znaxis
)
else:
self._header.set(
znaxis, axis, "length of original image axis", after=last_znaxis
)
self._header.set(ztile, ts, "size of tiles to be compressed", after=after1)
last_znaxis = znaxis
after1 = ztile
# Set the NAXIS2 header card in the table hdu to the number of
# rows in the table.
self._header.set("NAXIS2", nrows, "number of rows in table")
self.columns = cols
# Set the compression parameters in the table header.
# First, setup the values to be used for the compression parameters
# in case none were passed in. This will be either the value
# already in the table header for that parameter or the default
# value.
for idx in itertools.count(1):
zname = "ZNAME" + str(idx)
if zname not in self._header:
break
zval = "ZVAL" + str(idx)
if self._header[zname] == "NOISEBIT":
if quantize_level is None:
quantize_level = self._header[zval]
if self._header[zname] == "SCALE ":
if hcomp_scale is None:
hcomp_scale = self._header[zval]
if self._header[zname] == "SMOOTH ":
if hcomp_smooth is None:
hcomp_smooth = self._header[zval]
if quantize_level is None:
quantize_level = DEFAULT_QUANTIZE_LEVEL
if hcomp_scale is None:
hcomp_scale = DEFAULT_HCOMP_SCALE
if hcomp_smooth is None:
hcomp_smooth = DEFAULT_HCOMP_SCALE
# Next, strip the table header of all the ZNAMEn and ZVALn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
zname = "ZNAME" + str(idx)
if zname not in self._header:
break
zval = "ZVAL" + str(idx)
del self._header[zname]
del self._header[zval]
# Finally, put the appropriate keywords back based on the
# compression type.
after_keyword = "ZCMPTYPE"
idx = 1
if compression_type == "RICE_1":
self._header.set(
"ZNAME1", "BLOCKSIZE", "compression block size", after=after_keyword
)
self._header.set(
"ZVAL1", DEFAULT_BLOCK_SIZE, "pixels per block", after="ZNAME1"
)
self._header.set(
"ZNAME2", "BYTEPIX", "bytes per pixel (1, 2, 4, or 8)", after="ZVAL1"
)
if self._header["ZBITPIX"] == 8:
bytepix = 1
elif self._header["ZBITPIX"] == 16:
bytepix = 2
else:
bytepix = DEFAULT_BYTE_PIX
self._header.set(
"ZVAL2", bytepix, "bytes per pixel (1, 2, 4, or 8)", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
elif compression_type == "HCOMPRESS_1":
self._header.set(
"ZNAME1", "SCALE", "HCOMPRESS scale factor", after=after_keyword
)
self._header.set(
"ZVAL1", hcomp_scale, "HCOMPRESS scale factor", after="ZNAME1"
)
self._header.set(
"ZNAME2", "SMOOTH", "HCOMPRESS smooth option", after="ZVAL1"
)
self._header.set(
"ZVAL2", hcomp_smooth, "HCOMPRESS smooth option", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
if self._image_header["BITPIX"] < 0: # floating point image
self._header.set(
"ZNAME" + str(idx),
"NOISEBIT",
"floating point quantization level",
after=after_keyword,
)
self._header.set(
"ZVAL" + str(idx),
quantize_level,
"floating point quantization level",
after="ZNAME" + str(idx),
)
# Add the dither method and seed
if quantize_method:
if quantize_method not in [
NO_DITHER,
SUBTRACTIVE_DITHER_1,
SUBTRACTIVE_DITHER_2,
]:
name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD]
warnings.warn(
"Unknown quantization method provided. "
"Default method ({}) used.".format(name)
)
quantize_method = DEFAULT_QUANTIZE_METHOD
if quantize_method == NO_DITHER:
zquantiz_comment = "No dithering during quantization"
else:
zquantiz_comment = "Pixel Quantization Algorithm"
self._header.set(
"ZQUANTIZ",
QUANTIZE_METHOD_NAMES[quantize_method],
zquantiz_comment,
after="ZVAL" + str(idx),
)
else:
# If the ZQUANTIZ keyword is missing the default is to assume
# no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD
# is set to
quantize_method = self._header.get("ZQUANTIZ", NO_DITHER)
if isinstance(quantize_method, str):
for k, v in QUANTIZE_METHOD_NAMES.items():
if v.upper() == quantize_method:
quantize_method = k
break
else:
quantize_method = NO_DITHER
if quantize_method == NO_DITHER:
if "ZDITHER0" in self._header:
# If dithering isn't being used then there's no reason to
# keep the ZDITHER0 keyword
del self._header["ZDITHER0"]
else:
if dither_seed:
dither_seed = self._generate_dither_seed(dither_seed)
elif "ZDITHER0" in self._header:
dither_seed = self._header["ZDITHER0"]
else:
dither_seed = self._generate_dither_seed(DEFAULT_DITHER_SEED)
self._header.set(
"ZDITHER0",
dither_seed,
"dithering offset when quantizing floats",
after="ZQUANTIZ",
)
if image_header:
# Move SIMPLE card from the image header to the
# table header as ZSIMPLE card.
if "SIMPLE" in image_header:
self._header.set(
"ZSIMPLE",
image_header["SIMPLE"],
image_header.comments["SIMPLE"],
before="ZBITPIX",
)
# Move EXTEND card from the image header to the
# table header as ZEXTEND card.
if "EXTEND" in image_header:
self._header.set(
"ZEXTEND", image_header["EXTEND"], image_header.comments["EXTEND"]
)
# Move BLOCKED card from the image header to the
# table header as ZBLOCKED card.
if "BLOCKED" in image_header:
self._header.set(
"ZBLOCKED",
image_header["BLOCKED"],
image_header.comments["BLOCKED"],
)
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if "XTENSION" in image_header:
self._header.set(
"ZTENSION",
"IMAGE",
image_header.comments["XTENSION"],
before="ZBITPIX",
)
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if "PCOUNT" in image_header:
self._header.set(
"ZPCOUNT",
image_header["PCOUNT"],
image_header.comments["PCOUNT"],
after=last_znaxis,
)
if "GCOUNT" in image_header:
self._header.set(
"ZGCOUNT",
image_header["GCOUNT"],
image_header.comments["GCOUNT"],
after="ZPCOUNT",
)
# Move CHECKSUM and DATASUM cards from the image header to the
# table header as XHECKSUM and XDATASUM cards.
if "CHECKSUM" in image_header:
self._header.set(
"ZHECKSUM",
image_header["CHECKSUM"],
image_header.comments["CHECKSUM"],
)
if "DATASUM" in image_header:
self._header.set(
"ZDATASUM",
image_header["DATASUM"],
image_header.comments["DATASUM"],
)
else:
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if "XTENSION" in self._image_header:
self._header.set(
"ZTENSION",
"IMAGE",
self._image_header.comments["XTENSION"],
before="ZBITPIX",
)
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if "PCOUNT" in self._image_header:
self._header.set(
"ZPCOUNT",
self._image_header["PCOUNT"],
self._image_header.comments["PCOUNT"],
after=last_znaxis,
)
if "GCOUNT" in self._image_header:
self._header.set(
"ZGCOUNT",
self._image_header["GCOUNT"],
self._image_header.comments["GCOUNT"],
after="ZPCOUNT",
)
# When we have an image checksum we need to ensure that the same
# number of blank cards exist in the table header as there were in
# the image header. This allows those blank cards to be carried
# over to the image header when the hdu is uncompressed.
if "ZHECKSUM" in self._header:
required_blanks = image_header._countblanks()
image_blanks = self._image_header._countblanks()
table_blanks = self._header._countblanks()
for _ in range(required_blanks - image_blanks):
self._image_header.append()
table_blanks += 1
for _ in range(required_blanks - table_blanks):
self._header.append()
def _scale_data(self, data):
if self._orig_bzero != 0 or self._orig_bscale != 1:
new_dtype = self._dtype_for_bitpix()
data = np.array(data, dtype=new_dtype)
if "BLANK" in self._header:
blanks = data == np.array(self._header["BLANK"], dtype="int32")
else:
blanks = None
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data += self._bzero, and we
# do this instead of self.data = self.data + self._bzero to
# avoid doubling memory usage.
np.add(data, self._orig_bzero, out=data, casting="unsafe")
if blanks is not None:
data = np.where(blanks, np.nan, data)
return data
@lazyproperty
def data(self):
"""
The decompressed data array.
Note that accessing this will cause all the tiles to be loaded,
decompressed, and combined into a single data array. If you do
not need to access the whole array, consider instead using the
:attr:`~astropy.io.fits.CompImageHDU.section` property.
"""
if len(self.compressed_data) == 0:
return None
# Since .section has general code to load any arbitrary part of the
# data, we can just use this - and the @lazyproperty on the current
# property will ensure that we do this only once.
data = self.section[...]
# Right out of _ImageBaseHDU.data
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if (data is not None) and (
not isinstance(data, np.ndarray) or data.dtype.fields is not None
):
raise TypeError(
"CompImageHDU data has incorrect type:{}; dtype.fields = {}".format(
type(data), data.dtype.fields
)
)
@lazyproperty
def compressed_data(self):
# First we will get the table data (the compressed
# data) from the file, if there is any.
compressed_data = super().data
if isinstance(compressed_data, np.rec.recarray):
# Make sure not to use 'del self.data' so we don't accidentally
# go through the self.data.fdel and close the mmap underlying
# the compressed_data array
del self.__dict__["data"]
return compressed_data
else:
# This will actually set self.compressed_data with the
# pre-allocated space for the compression data; this is something I
# might do away with in the future
self._update_compressed_data()
return self.compressed_data
@compressed_data.deleter
def compressed_data(self):
# Deleting the compressed_data attribute has to be handled
# with a little care to prevent a reference leak
# First delete the ._coldefs attributes under it to break a possible
# reference cycle
if "compressed_data" in self.__dict__:
del self.__dict__["compressed_data"]._coldefs
# Now go ahead and delete from self.__dict__; normally
# lazyproperty.__delete__ does this for us, but we can prempt it to
# do some additional cleanup
del self.__dict__["compressed_data"]
# If this file was mmap'd, numpy.memmap will hold open a file
# handle until the underlying mmap object is garbage-collected;
# since this reference leak can sometimes hang around longer than
# welcome go ahead and force a garbage collection
gc.collect()
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@lazyproperty
def header(self):
# The header attribute is the header for the image data. It
# is not actually stored in the object dictionary. Instead,
# the _image_header is stored. If the _image_header attribute
# has already been defined we just return it. If not, we must
# create it from the table header (the _header attribute).
if hasattr(self, "_image_header"):
return self._image_header
# Clean up any possible doubled EXTNAME keywords that use
# the default. Do this on the original header to ensure
# duplicates are removed cleanly.
self._remove_unnecessary_default_extnames(self._header)
# Start with a copy of the table header.
image_header = self._header.copy()
# Delete cards that are related to the table. And move
# the values of those cards that relate to the image from
# their corresponding table cards. These include
# ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn.
# (Note: Used set here instead of list in case there are any duplicate
# keywords, which there may be in some pathological cases:
# https://github.com/astropy/astropy/issues/2750
for keyword in set(image_header):
if CompImageHeader._is_reserved_keyword(keyword, warn=False):
del image_header[keyword]
hcomments = self._header.comments
if "ZSIMPLE" in self._header:
image_header.set(
"SIMPLE", self._header["ZSIMPLE"], hcomments["ZSIMPLE"], before=0
)
del image_header["XTENSION"]
elif "ZTENSION" in self._header:
if self._header["ZTENSION"] != "IMAGE":
warnings.warn(
"ZTENSION keyword in compressed extension != 'IMAGE'",
AstropyUserWarning,
)
image_header.set("XTENSION", "IMAGE", hcomments["ZTENSION"], before=0)
else:
image_header.set("XTENSION", "IMAGE", before=0)
image_header.set(
"BITPIX", self._header["ZBITPIX"], hcomments["ZBITPIX"], before=1
)
image_header.set("NAXIS", self._header["ZNAXIS"], hcomments["ZNAXIS"], before=2)
last_naxis = "NAXIS"
for idx in range(image_header["NAXIS"]):
znaxis = "ZNAXIS" + str(idx + 1)
naxis = znaxis[1:]
image_header.set(
naxis, self._header[znaxis], hcomments[znaxis], after=last_naxis
)
last_naxis = naxis
# Delete any other spurious NAXISn keywords:
naxis = image_header["NAXIS"]
for keyword in list(image_header["NAXIS?*"]):
try:
n = int(keyword[5:])
except Exception:
continue
if n > naxis:
del image_header[keyword]
# Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs,
# ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs
# their values are always 0 and 1 respectively
if "ZPCOUNT" in self._header:
image_header.set(
"PCOUNT",
self._header["ZPCOUNT"],
hcomments["ZPCOUNT"],
after=last_naxis,
)
else:
image_header.set("PCOUNT", 0, after=last_naxis)
if "ZGCOUNT" in self._header:
image_header.set(
"GCOUNT", self._header["ZGCOUNT"], hcomments["ZGCOUNT"], after="PCOUNT"
)
else:
image_header.set("GCOUNT", 1, after="PCOUNT")
if "ZEXTEND" in self._header:
image_header.set("EXTEND", self._header["ZEXTEND"], hcomments["ZEXTEND"])
if "ZBLOCKED" in self._header:
image_header.set("BLOCKED", self._header["ZBLOCKED"], hcomments["ZBLOCKED"])
# Move the ZHECKSUM and ZDATASUM cards to the image header
# as CHECKSUM and DATASUM
if "ZHECKSUM" in self._header:
image_header.set(
"CHECKSUM", self._header["ZHECKSUM"], hcomments["ZHECKSUM"]
)
if "ZDATASUM" in self._header:
image_header.set("DATASUM", self._header["ZDATASUM"], hcomments["ZDATASUM"])
# Remove the EXTNAME card if the value in the table header
# is the default value of COMPRESSED_IMAGE.
if "EXTNAME" in image_header and image_header["EXTNAME"] == self._default_name:
del image_header["EXTNAME"]
# Remove the PCOUNT GCOUNT cards if the uncompressed header is
# from a primary HDU
if "SIMPLE" in image_header:
del image_header["PCOUNT"]
del image_header["GCOUNT"]
# Look to see if there are any blank cards in the table
# header. If there are, there should be the same number
# of blank cards in the image header. Add blank cards to
# the image header to make it so.
table_blanks = self._header._countblanks()
image_blanks = image_header._countblanks()
for _ in range(table_blanks - image_blanks):
image_header.append()
# Create the CompImageHeader that syncs with the table header, and save
# it off to self._image_header so it can be referenced later
# unambiguously
self._image_header = CompImageHeader(self._header, image_header)
return self._image_header
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
_shape, _format = (), ""
else:
# the shape will be in the order of NAXIS's which is the
# reverse of the numarray shape
_shape = list(self.data.shape)
_format = self.data.dtype.name
_shape.reverse()
_shape = tuple(_shape)
_format = _format[_format.rfind(".") + 1 :]
# if data is not touched yet, use header info.
else:
_shape = ()
for idx in range(self.header["NAXIS"]):
_shape += (self.header["NAXIS" + str(idx + 1)],)
_format = BITPIX2DTYPE[self.header["BITPIX"]]
return (self.name, self.ver, class_name, len(self.header), _shape, _format)
def _update_compressed_data(self):
"""
Compress the image data so that it may be written to a file.
"""
# Check to see that the image_header matches the image data
image_bitpix = DTYPE2BITPIX[self.data.dtype.name]
if image_bitpix != self._orig_bitpix or self.data.shape != self.shape:
self._update_header_data(self.header)
# TODO: This is copied right out of _ImageBaseHDU._writedata_internal;
# it would be cool if we could use an internal ImageHDU and use that to
# write to a buffer for compression or something. See ticket #88
# deal with unsigned integer 16, 32 and 64 data
old_data = self.data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
self.data = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f"=i{self.data.dtype.itemsize}",
)
try:
nrows = self._header["NAXIS2"]
tbsize = self._header["NAXIS1"] * nrows
self._header["PCOUNT"] = 0
if "THEAP" in self._header:
del self._header["THEAP"]
self._theap = tbsize
# First delete the original compressed data, if it exists
del self.compressed_data
# Compress the data.
# compress_hdu returns the size of the heap for the written
# compressed image table
heapsize, self.compressed_data = compress_hdu(self)
finally:
self.data = old_data
# CFITSIO will write the compressed data in big-endian order
dtype = self.columns.dtype.newbyteorder(">")
buf = self.compressed_data
compressed_data = buf[: self._theap].view(dtype=dtype, type=np.rec.recarray)
self.compressed_data = compressed_data.view(FITS_rec)
self.compressed_data._coldefs = self.columns
self.compressed_data._heapoffset = self._theap
self.compressed_data._heapsize = heapsize
def scale(self, type=None, option="old", bscale=1, bzero=0):
"""
Scale image data by using ``BSCALE`` and ``BZERO``.
Calling this method will scale ``self.data`` and update the keywords of
``BSCALE`` and ``BZERO`` in ``self._header`` and ``self._image_header``.
This method should only be used right before writing to the output
file, as the data will be scaled and is therefore not very usable after
the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy dtype
name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is
`None`, use the current data type.
option : str, optional
how to scale the data: if ``"old"``, use the original ``BSCALE``
and ``BZERO`` values when the data was read/created. If
``"minmax"``, use the minimum and maximum of the data to scale.
The option will be overwritten by any user-specified bscale/bzero
values.
bscale, bzero : int, optional
user specified ``BSCALE`` and ``BZERO`` values.
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale != 1 or bzero != 0:
_scale = bscale
_zero = bzero
else:
if option == "old":
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == "minmax":
if isinstance(_type, np.floating):
_scale = 1
_zero = 0
else:
_min = np.minimum.reduce(self.data.flat)
_max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = _min
_scale = (_max - _min) / (2.0**8 - 1)
else:
_zero = (_max + _min) / 2.0
# throw away -2^N
_scale = (_max - _min) / (2.0 ** (8 * _type.bytes) - 2)
# Do the scaling
if _zero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data -= _zero, and we
# do this instead of self.data = self.data - _zero to
# avoid doubling memory usage.
np.subtract(self.data, _zero, out=self.data, casting="unsafe")
self.header["BZERO"] = _zero
else:
# Delete from both headers
for header in (self.header, self._header):
with suppress(KeyError):
del header["BZERO"]
if _scale != 1:
self.data /= _scale
self.header["BSCALE"] = _scale
else:
for header in (self.header, self._header):
with suppress(KeyError):
del header["BSCALE"]
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type) # 0.7.7.1
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self.header.get("BZERO", 0)
self._bscale = self.header.get("BSCALE", 1)
# Update BITPIX for the image header specifically
# TODO: Make this more clear by using self._image_header, but only once
# this has been fixed so that the _image_header attribute is guaranteed
# to be valid
self.header["BITPIX"] = self._bitpix
# Update the table header to match the scaled data
self._update_header_data(self.header)
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self.scale(BITPIX2DTYPE[self._orig_bitpix])
if self._has_data:
self._update_compressed_data()
# Use methods in the superclass to update the header with
# scale/checksum keywords based on the data type of the image data
self._update_pseudo_int_scale_keywords()
# Shove the image header and data into a new ImageHDU and use that
# to compute the image checksum
image_hdu = ImageHDU(data=self.data, header=self.header)
image_hdu._update_checksum(checksum)
if "CHECKSUM" in image_hdu.header:
# This will also pass through to the ZHECKSUM keyword and
# ZDATASUM keyword
self._image_header.set(
"CHECKSUM",
image_hdu.header["CHECKSUM"],
image_hdu.header.comments["CHECKSUM"],
)
if "DATASUM" in image_hdu.header:
self._image_header.set(
"DATASUM",
image_hdu.header["DATASUM"],
image_hdu.header.comments["DATASUM"],
)
# Store a temporary backup of self.data in a different attribute;
# see below
self._imagedata = self.data
# Now we need to perform an ugly hack to set the compressed data as
# the .data attribute on the HDU so that the call to _writedata
# handles it properly
self.__dict__["data"] = self.compressed_data
return super()._prewriteto(checksum=checksum, inplace=inplace)
def _writeheader(self, fileobj):
"""
Bypasses `BinTableHDU._writeheader()` which updates the header with
metadata about the data that is meaningless here; another reason
why this class maybe shouldn't inherit directly from BinTableHDU...
"""
return ExtensionHDU._writeheader(self, fileobj)
def _writedata(self, fileobj):
"""
Wrap the basic ``_writedata`` method to restore the ``.data``
attribute to the uncompressed image data in the case of an exception.
"""
try:
return super()._writedata(fileobj)
finally:
# Restore the .data attribute to its rightful value (if any)
if hasattr(self, "_imagedata"):
self.__dict__["data"] = self._imagedata
del self._imagedata
else:
del self.data
def _close(self, closed=True):
super()._close(closed=closed)
# Also make sure to close access to the compressed data mmaps
if (
closed
and self._data_loaded
and _get_array_mmap(self.compressed_data) is not None
):
del self.compressed_data
# TODO: This was copied right out of _ImageBaseHDU; get rid of it once we
# find a way to rewrite this class as either a subclass or wrapper for an
# ImageHDU
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
for bits, dtype in (
(16, np.dtype("uint16")),
(32, np.dtype("uint32")),
(64, np.dtype("uint64")),
):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype("float64")
elif bitpix > 0: # scale integers to Float32
return np.dtype("float32")
def _update_header_scale_info(self, dtype=None):
if not self._do_not_scale_image_data and not (
self._orig_bzero == 0 and self._orig_bscale == 1
):
for keyword in ["BSCALE", "BZERO"]:
# Make sure to delete from both the image header and the table
# header; later this will be streamlined
for header in (self.header, self._header):
with suppress(KeyError):
del header[keyword]
# Since _update_header_scale_info can, currently, be
# called *after* _prewriteto(), replace these with
# blank cards so the header size doesn't change
header.append()
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self.header["BITPIX"] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self.header["BITPIX"]
def _generate_dither_seed(self, seed):
if not _is_int(seed):
raise TypeError("Seed must be an integer")
if not -1 <= seed <= 10000:
raise ValueError(
"Seed for random dithering must be either between 1 and "
"10000 inclusive, 0 for autogeneration from the system "
"clock, or -1 for autogeneration from a checksum of the first "
"image tile (got {})".format(seed)
)
if seed == DITHER_SEED_CHECKSUM:
# Determine the tile dimensions from the ZTILEn keywords
tile_dims = self.tile_shape
# Get the first tile by using the tile dimensions as the end
# indices of slices (starting from 0)
first_tile = self.data[tuple(slice(d) for d in tile_dims)]
# The checksum algorithm used is literally just the sum of the bytes
# of the tile data (not its actual floating point values). Integer
# overflow is irrelevant.
csum = first_tile.view(dtype="uint8").sum()
# Since CFITSIO uses an unsigned long (which may be different on
# different platforms) go ahead and truncate the sum to its
# unsigned long value and take the result modulo 10000
return (ctypes.c_ulong(csum).value % 10000) + 1
elif seed == DITHER_SEED_CLOCK:
# This isn't exactly the same algorithm as CFITSIO, but that's okay
# since the result is meant to be arbitrary. The primary difference
# is that CFITSIO incorporates the HDU number into the result in
# the hopes of heading off the possibility of the same seed being
# generated for two HDUs at the same time. Here instead we just
# add in the HDU object's id
return (
(sum(int(x) for x in math.modf(time.time())) + id(self)) % 10000
) + 1
else:
return seed
@property
def section(self):
"""
Efficiently access a section of the image array
This property can be used to access a section of the data without
loading and decompressing the entire array into memory.
The :class:`~astropy.io.fits.CompImageSection` object returned by this
attribute is not meant to be used directly by itself. Rather, slices of
the section return the appropriate slice of the data, and loads *only*
that section into memory. Any valid basic Numpy index can be used to
slice :class:`~astropy.io.fits.CompImageSection`.
Note that accessing data using :attr:`CompImageHDU.section` will always
load tiles one at a time from disk, and therefore when accessing a large
fraction of the data (or slicing it in a way that would cause most tiles
to be loaded) you may obtain better performance by using
:attr:`CompImageHDU.data`.
"""
return CompImageSection(self)
@property
def tile_shape(self):
"""
The tile shape used for the tiled compression.
This shape is given in Numpy/C order
"""
return tuple(
[
self._header[f"ZTILE{idx + 1}"]
for idx in range(self._header["ZNAXIS"] - 1, -1, -1)
]
)
@property
def compression_type(self):
"""
The name of the compression algorithm.
"""
return self._header.get("ZCMPTYPE", DEFAULT_COMPRESSION_TYPE)
class CompImageSection:
"""
Class enabling subsets of CompImageHDU data to be loaded lazily via slicing.
Slices of this object load the corresponding section of an image array from
the underlying FITS file, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`astropy:data-sections` section of the Astropy documentation
for more details.
"""
def __init__(self, hdu):
self.hdu = hdu
self._data_shape = _data_shape(self.hdu._header)
self._tile_shape = _tile_shape(self.hdu._header)
self._n_dim = len(self._data_shape)
self._n_tiles = np.array(
_n_tiles(self._data_shape, self._tile_shape), dtype=int
)
@property
def shape(self):
return tuple(self._data_shape)
@property
def ndim(self):
return self.hdu._header["ZNAXIS"]
@property
def dtype(self):
return BITPIX2DTYPE[self.hdu._header["ZBITPIX"]]
def __getitem__(self, index):
# Shortcut if the whole data is requested (this is used by the
# data property, so we optimize it as it is frequently used)
if index is Ellipsis:
first_tile_index = np.zeros(self._n_dim, dtype=int)
last_tile_index = self._n_tiles - 1
data = decompress_hdu_section(self.hdu, first_tile_index, last_tile_index)
return self.hdu._scale_data(data)
index = simplify_basic_index(index, shape=self._data_shape)
# Determine for each dimension the first and last tile to extract
first_tile_index = np.zeros(self._n_dim, dtype=int)
last_tile_index = np.zeros(self._n_dim, dtype=int)
final_array_index = []
for dim, idx in enumerate(index):
if isinstance(idx, slice):
if idx.step > 0:
first_tile_index[dim] = idx.start // self._tile_shape[dim]
last_tile_index[dim] = (idx.stop - 1) // self._tile_shape[dim]
else:
stop = 0 if idx.stop is None else max(idx.stop - 1, 0)
first_tile_index[dim] = stop // self._tile_shape[dim]
last_tile_index[dim] = idx.start // self._tile_shape[dim]
# Because slices such as slice(5, 0, 1) can exist (which
# would be empty) we need to make sure last_tile_index is
# always larger than first_tile_index
last_tile_index = np.maximum(last_tile_index, first_tile_index)
if idx.step < 0 and idx.stop is None:
final_array_index.append(idx)
else:
final_array_index.append(
slice(
idx.start - self._tile_shape[dim] * first_tile_index[dim],
idx.stop - self._tile_shape[dim] * first_tile_index[dim],
idx.step,
)
)
else:
first_tile_index[dim] = idx // self._tile_shape[dim]
last_tile_index[dim] = first_tile_index[dim]
final_array_index.append(
idx - self._tile_shape[dim] * first_tile_index[dim]
)
data = decompress_hdu_section(self.hdu, first_tile_index, last_tile_index)
return self.hdu._scale_data(data[tuple(final_array_index)])
|
fb2be36f94f0e45463ae5c0f491cba1857eba791c098b424c72e0ac05cc92c4d | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import datetime
import numbers
import os
import sys
import warnings
from contextlib import suppress
from inspect import Parameter, signature
import numpy as np
from astropy.io.fits import conf
from astropy.io.fits.file import _File
from astropy.io.fits.header import Header, _BasicHeader, _DelayedHeader, _pad_length
from astropy.io.fits.util import (
_extract_number,
_free_space_check,
_get_array_mmap,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
decode_ascii,
first,
itersubclasses,
)
from astropy.io.fits.verify import _ErrList, _Verify
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
__all__ = [
"DELAYED",
# classes
"InvalidHDUException",
"ExtensionHDU",
"NonstandardExtHDU",
]
class _Delayed:
pass
DELAYED = _Delayed()
BITPIX2DTYPE = {
8: "uint8",
16: "int16",
32: "int32",
64: "int64",
-32: "float32",
-64: "float64",
}
"""Maps FITS BITPIX values to Numpy dtype names."""
DTYPE2BITPIX = {
"int8": 8,
"uint8": 8,
"int16": 16,
"uint16": 16,
"int32": 32,
"uint32": 32,
"int64": 64,
"uint64": 64,
"float32": -32,
"float64": -64,
}
"""
Maps Numpy dtype names to FITS BITPIX values (this includes unsigned
integers, with the assumption that the pseudo-unsigned integer convention
will be used in this case.
"""
class InvalidHDUException(Exception):
"""
A custom exception class used mainly to signal to _BaseHDU.__new__ that
an HDU cannot possibly be considered valid, and must be assumed to be
corrupted.
"""
def _hdu_class_from_header(cls, header):
"""
Iterates through the subclasses of _BaseHDU and uses that class's
match_header() method to determine which subclass to instantiate.
It's important to be aware that the class hierarchy is traversed in a
depth-last order. Each match_header() should identify an HDU type as
uniquely as possible. Abstract types may choose to simply return False
or raise NotImplementedError to be skipped.
If any unexpected exceptions are raised while evaluating
match_header(), the type is taken to be _CorruptedHDU.
Used primarily by _BaseHDU._readfrom_internal and _BaseHDU._from_data to
find an appropriate HDU class to use based on values in the header.
"""
klass = cls # By default, if no subclasses are defined
if header:
for c in reversed(list(itersubclasses(cls))):
try:
# HDU classes built into astropy.io.fits are always considered,
# but extension HDUs must be explicitly registered
if not (
c.__module__.startswith("astropy.io.fits.")
or c in cls._hdu_registry
):
continue
if c.match_header(header):
klass = c
break
except NotImplementedError:
continue
except Exception as exc:
warnings.warn(
"An exception occurred matching an HDU header to the "
"appropriate HDU type: {}".format(exc),
AstropyUserWarning,
)
warnings.warn(
"The HDU will be treated as corrupted.", AstropyUserWarning
)
klass = _CorruptedHDU
del exc
break
return klass
# TODO: Come up with a better __repr__ for HDUs (and for HDULists, for that
# matter)
class _BaseHDU:
"""Base class for all HDU (header data unit) classes."""
_hdu_registry = set()
# This HDU type is part of the FITS standard
_standard = True
# Byte to use for padding out blocks
_padding_byte = "\x00"
_default_name = ""
# _header uses a descriptor to delay the loading of the fits.Header object
# until it is necessary.
_header = _DelayedHeader()
def __init__(self, data=None, header=None, *args, **kwargs):
if header is None:
header = Header()
self._header = header
self._header_str = None
self._file = None
self._buffer = None
self._header_offset = None
self._data_offset = None
self._data_size = None
# This internal variable is used to track whether the data attribute
# still points to the same data array as when the HDU was originally
# created (this does not track whether the data is actually the same
# content-wise)
self._data_replaced = False
self._data_needs_rescale = False
self._new = True
self._output_checksum = False
if "DATASUM" in self._header and "CHECKSUM" not in self._header:
self._output_checksum = "datasum"
elif "CHECKSUM" in self._header:
self._output_checksum = True
def __init_subclass__(cls, **kwargs):
# Add the same data.deleter to all HDUs with a data property.
# It's unfortunate, but there's otherwise no straightforward way
# that a property can inherit setters/deleters of the property of the
# same name on base classes.
data_prop = cls.__dict__.get("data", None)
if isinstance(data_prop, (lazyproperty, property)) and data_prop.fdel is None:
# Don't do anything if the class has already explicitly
# set the deleter for its data property
def data(self):
# The deleter
if self._file is not None and self._data_loaded:
# sys.getrefcount is CPython specific and not on PyPy.
has_getrefcount = hasattr(sys, "getrefcount")
if has_getrefcount:
data_refcount = sys.getrefcount(self.data)
# Manually delete *now* so that FITS_rec.__del__
# cleanup can happen if applicable
del self.__dict__["data"]
# Don't even do this unless the *only* reference to the
# .data array was the one we're deleting by deleting
# this attribute; if any other references to the array
# are hanging around (perhaps the user ran ``data =
# hdu.data``) don't even consider this:
if has_getrefcount and data_refcount == 2:
self._file._maybe_close_mmap()
cls.data = data_prop.deleter(data)
return super().__init_subclass__(**kwargs)
@property
def header(self):
return self._header
@header.setter
def header(self, value):
self._header = value
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
return str(self._header.get("EXTNAME", self._default_name))
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if "EXTNAME" in self._header:
self._header["EXTNAME"] = value
else:
self._header["EXTNAME"] = (value, "extension name")
@property
def ver(self):
return self._header.get("EXTVER", 1)
@ver.setter
def ver(self, value):
if not _is_int(value):
raise TypeError("'ver' attribute must be an integer")
if "EXTVER" in self._header:
self._header["EXTVER"] = value
else:
self._header["EXTVER"] = (value, "extension value")
@property
def level(self):
return self._header.get("EXTLEVEL", 1)
@level.setter
def level(self, value):
if not _is_int(value):
raise TypeError("'level' attribute must be an integer")
if "EXTLEVEL" in self._header:
self._header["EXTLEVEL"] = value
else:
self._header["EXTLEVEL"] = (value, "extension level")
@property
def is_image(self):
return self.name == "PRIMARY" or (
"XTENSION" in self._header
and (
self._header["XTENSION"] == "IMAGE"
or (
self._header["XTENSION"] == "BINTABLE"
and "ZIMAGE" in self._header
and self._header["ZIMAGE"] is True
)
)
)
@property
def _data_loaded(self):
return "data" in self.__dict__ and self.data is not DELAYED
@property
def _has_data(self):
return self._data_loaded and self.data is not None
@classmethod
def register_hdu(cls, hducls):
cls._hdu_registry.add(hducls)
@classmethod
def unregister_hdu(cls, hducls):
if hducls in cls._hdu_registry:
cls._hdu_registry.remove(hducls)
@classmethod
def match_header(cls, header):
raise NotImplementedError
@classmethod
def fromstring(cls, data, checksum=False, ignore_missing_end=False, **kwargs):
"""
Creates a new HDU object of the appropriate type from a string
containing the HDU's entire header and, optionally, its data.
Note: When creating a new HDU from a string without a backing file
object, the data of that HDU may be read-only. It depends on whether
the underlying string was an immutable Python str/bytes object, or some
kind of read-write memory buffer such as a `memoryview`.
Parameters
----------
data : str, bytearray, memoryview, ndarray
A byte string containing the HDU's header and data.
checksum : bool, optional
Check the HDU's checksum and/or datasum.
ignore_missing_end : bool, optional
Ignore a missing end card in the header data. Note that without the
end card the end of the header may be ambiguous and resulted in a
corrupt HDU. In this case the assumption is that the first 2880
block that does not begin with valid FITS header data is the
beginning of the data.
**kwargs : optional
May consist of additional keyword arguments specific to an HDU
type--these correspond to keywords recognized by the constructors of
different HDU classes such as `PrimaryHDU`, `ImageHDU`, or
`BinTableHDU`. Any unrecognized keyword arguments are simply
ignored.
"""
return cls._readfrom_internal(
data, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs
)
@classmethod
def readfrom(cls, fileobj, checksum=False, ignore_missing_end=False, **kwargs):
"""
Read the HDU from a file. Normally an HDU should be opened with
:func:`open` which reads the entire HDU list in a FITS file. But this
method is still provided for symmetry with :func:`writeto`.
Parameters
----------
fileobj : file-like
Input FITS file. The file's seek pointer is assumed to be at the
beginning of the HDU.
checksum : bool
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card
values (when present in the HDU header) match the header and data
of all HDU's in the file.
ignore_missing_end : bool
Do not issue an exception when opening a file that is missing an
``END`` card in the last header.
"""
# TODO: Figure out a way to make it possible for the _File
# constructor to be a noop if the argument is already a _File
if not isinstance(fileobj, _File):
fileobj = _File(fileobj)
hdu = cls._readfrom_internal(
fileobj, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs
)
# If the checksum had to be checked the data may have already been read
# from the file, in which case we don't want to seek relative
fileobj.seek(hdu._data_offset + hdu._data_size, os.SEEK_SET)
return hdu
def writeto(self, name, output_verify="exception", overwrite=False, checksum=False):
"""
Write the HDU to a new file. This is a convenience method to
provide a user easier output interface if only one HDU needs
to be written to a file.
Parameters
----------
name : path-like or file-like
Output FITS file. If the file object is already opened, it must
be opened in a writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the header of the HDU when written to the file.
"""
from .hdulist import HDUList
hdulist = HDUList([self])
hdulist.writeto(name, output_verify, overwrite=overwrite, checksum=checksum)
@classmethod
def _from_data(cls, data, header, **kwargs):
"""
Instantiate the HDU object after guessing the HDU class from the
FITS Header.
"""
klass = _hdu_class_from_header(cls, header)
return klass(data=data, header=header, **kwargs)
@classmethod
def _readfrom_internal(
cls, data, header=None, checksum=False, ignore_missing_end=False, **kwargs
):
"""
Provides the bulk of the internal implementation for readfrom and
fromstring.
For some special cases, supports using a header that was already
created, and just using the input data for the actual array data.
"""
hdu_buffer = None
hdu_fileobj = None
header_offset = 0
if isinstance(data, _File):
if header is None:
header_offset = data.tell()
try:
# First we try to read the header with the fast parser
# from _BasicHeader, which will read only the standard
# 8 character keywords to get the structural keywords
# that are needed to build the HDU object.
header_str, header = _BasicHeader.fromfile(data)
except Exception:
# If the fast header parsing failed, then fallback to
# the classic Header parser, which has better support
# and reporting for the various issues that can be found
# in the wild.
data.seek(header_offset)
header = Header.fromfile(data, endcard=not ignore_missing_end)
hdu_fileobj = data
data_offset = data.tell() # *after* reading the header
else:
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype="ubyte", buffer=data)
except TypeError:
raise TypeError(
f"The provided object {data!r} does not contain an underlying "
"memory buffer. fromstring() requires an object that "
"supports the buffer interface such as bytes, buffer, "
"memoryview, ndarray, etc. This restriction is to ensure "
"that efficient access to the array/table data is possible."
)
if header is None:
def block_iter(nbytes):
idx = 0
while idx < len(data):
yield data[idx : idx + nbytes]
idx += nbytes
header_str, header = Header._from_blocks(
block_iter, True, "", not ignore_missing_end, True
)
if len(data) > len(header_str):
hdu_buffer = data
elif data:
hdu_buffer = data
header_offset = 0
data_offset = len(header_str)
# Determine the appropriate arguments to pass to the constructor from
# self._kwargs. self._kwargs contains any number of optional arguments
# that may or may not be valid depending on the HDU type
cls = _hdu_class_from_header(cls, header)
sig = signature(cls.__init__)
new_kwargs = kwargs.copy()
if Parameter.VAR_KEYWORD not in (x.kind for x in sig.parameters.values()):
# If __init__ accepts arbitrary keyword arguments, then we can go
# ahead and pass all keyword arguments; otherwise we need to delete
# any that are invalid
for key in kwargs:
if key not in sig.parameters:
del new_kwargs[key]
try:
hdu = cls(data=DELAYED, header=header, **new_kwargs)
except TypeError:
# This may happen because some HDU class (e.g. GroupsHDU) wants
# to set a keyword on the header, which is not possible with the
# _BasicHeader. While HDU classes should not need to modify the
# header in general, sometimes this is needed to fix it. So in
# this case we build a full Header and try again to create the
# HDU object.
if isinstance(header, _BasicHeader):
header = Header.fromstring(header_str)
hdu = cls(data=DELAYED, header=header, **new_kwargs)
else:
raise
# One of these may be None, depending on whether the data came from a
# file or a string buffer--later this will be further abstracted
hdu._file = hdu_fileobj
hdu._buffer = hdu_buffer
hdu._header_offset = header_offset # beginning of the header area
hdu._data_offset = data_offset # beginning of the data area
# data area size, including padding
size = hdu.size
hdu._data_size = size + _pad_length(size)
if isinstance(hdu._header, _BasicHeader):
# Delete the temporary _BasicHeader.
# We need to do this before an eventual checksum computation,
# since it needs to modify temporarily the header
#
# The header string is stored in the HDU._header_str attribute,
# so that it can be used directly when we need to create the
# classic Header object, without having to parse again the file.
del hdu._header
hdu._header_str = header_str
# Checksums are not checked on invalid HDU types
if checksum and checksum != "remove" and isinstance(hdu, _ValidHDU):
hdu._verify_checksum_datasum()
return hdu
def _get_raw_data(self, shape, code, offset):
"""
Return raw array from either the HDU's memory buffer or underlying
file.
"""
if isinstance(shape, numbers.Integral):
shape = (shape,)
if self._buffer:
return np.ndarray(shape, dtype=code, buffer=self._buffer, offset=offset)
elif self._file:
return self._file.readarray(offset=offset, dtype=code, shape=shape)
else:
return None
# TODO: Rework checksum handling so that it's not necessary to add a
# checksum argument here
# TODO: The BaseHDU class shouldn't even handle checksums since they're
# only implemented on _ValidHDU...
def _prewriteto(self, checksum=False, inplace=False):
self._update_pseudo_int_scale_keywords()
# Handle checksum
self._update_checksum(checksum)
def _update_pseudo_int_scale_keywords(self):
"""
If the data is signed int 8, unsigned int 16, 32, or 64,
add BSCALE/BZERO cards to header.
"""
if self._has_data and self._standard and _is_pseudo_integer(self.data.dtype):
# CompImageHDUs need TFIELDS immediately after GCOUNT,
# so BSCALE has to go after TFIELDS if it exists.
if "TFIELDS" in self._header:
self._header.set("BSCALE", 1, after="TFIELDS")
elif "GCOUNT" in self._header:
self._header.set("BSCALE", 1, after="GCOUNT")
else:
self._header.set("BSCALE", 1)
self._header.set("BZERO", _pseudo_zero(self.data.dtype), after="BSCALE")
def _update_checksum(
self, checksum, checksum_keyword="CHECKSUM", datasum_keyword="DATASUM"
):
"""Update the 'CHECKSUM' and 'DATASUM' keywords in the header (or
keywords with equivalent semantics given by the ``checksum_keyword``
and ``datasum_keyword`` arguments--see for example ``CompImageHDU``
for an example of why this might need to be overridden).
"""
# If the data is loaded it isn't necessarily 'modified', but we have no
# way of knowing for sure
modified = self._header._modified or self._data_loaded
if checksum == "remove":
if checksum_keyword in self._header:
del self._header[checksum_keyword]
if datasum_keyword in self._header:
del self._header[datasum_keyword]
elif (
modified
or self._new
or (
checksum
and (
"CHECKSUM" not in self._header
or "DATASUM" not in self._header
or not self._checksum_valid
or not self._datasum_valid
)
)
):
if checksum == "datasum":
self.add_datasum(datasum_keyword=datasum_keyword)
elif checksum:
self.add_checksum(
checksum_keyword=checksum_keyword, datasum_keyword=datasum_keyword
)
def _postwriteto(self):
# If data is unsigned integer 16, 32 or 64, remove the
# BSCALE/BZERO cards
if self._has_data and self._standard and _is_pseudo_integer(self.data.dtype):
for keyword in ("BSCALE", "BZERO"):
with suppress(KeyError):
del self._header[keyword]
def _writeheader(self, fileobj):
offset = 0
with suppress(AttributeError, OSError):
offset = fileobj.tell()
self._header.tofile(fileobj)
try:
size = fileobj.tell() - offset
except (AttributeError, OSError):
size = len(str(self._header))
return offset, size
def _writedata(self, fileobj):
size = 0
fileobj.flush()
try:
offset = fileobj.tell()
except (AttributeError, OSError):
offset = 0
if self._data_loaded or self._data_needs_rescale:
if self.data is not None:
size += self._writedata_internal(fileobj)
# pad the FITS data block
# to avoid a bug in the lustre filesystem client, don't
# write zero-byte objects
if size > 0 and _pad_length(size) > 0:
padding = _pad_length(size) * self._padding_byte
# TODO: Not that this is ever likely, but if for some odd
# reason _padding_byte is > 0x80 this will fail; but really if
# somebody's custom fits format is doing that, they're doing it
# wrong and should be reprimanded harshly.
fileobj.write(padding.encode("ascii"))
size += len(padding)
else:
# The data has not been modified or does not need need to be
# rescaled, so it can be copied, unmodified, directly from an
# existing file or buffer
size += self._writedata_direct_copy(fileobj)
# flush, to make sure the content is written
fileobj.flush()
# return both the location and the size of the data area
return offset, size
def _writedata_internal(self, fileobj):
"""
The beginning and end of most _writedata() implementations are the
same, but the details of writing the data array itself can vary between
HDU types, so that should be implemented in this method.
Should return the size in bytes of the data written.
"""
fileobj.writearray(self.data)
return self.data.size * self.data.itemsize
def _writedata_direct_copy(self, fileobj):
"""Copies the data directly from one file/buffer to the new file.
For now this is handled by loading the raw data from the existing data
(including any padding) via a memory map or from an already in-memory
buffer and using Numpy's existing file-writing facilities to write to
the new file.
If this proves too slow a more direct approach may be used.
"""
raw = self._get_raw_data(self._data_size, "ubyte", self._data_offset)
if raw is not None:
fileobj.writearray(raw)
return raw.nbytes
else:
return 0
# TODO: This is the start of moving HDU writing out of the _File class;
# Though right now this is an internal private method (though still used by
# HDUList, eventually the plan is to have this be moved into writeto()
# somehow...
def _writeto(self, fileobj, inplace=False, copy=False):
try:
dirname = os.path.dirname(fileobj._file.name)
except (AttributeError, TypeError):
dirname = None
with _free_space_check(self, dirname):
self._writeto_internal(fileobj, inplace, copy)
def _writeto_internal(self, fileobj, inplace, copy):
# For now fileobj is assumed to be a _File object
if not inplace or self._new:
header_offset, _ = self._writeheader(fileobj)
data_offset, data_size = self._writedata(fileobj)
# Set the various data location attributes on newly-written HDUs
if self._new:
self._header_offset = header_offset
self._data_offset = data_offset
self._data_size = data_size
return
hdrloc = self._header_offset
hdrsize = self._data_offset - self._header_offset
datloc = self._data_offset
datsize = self._data_size
if self._header._modified:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# This should update hdrloc with he header location in the new file
hdrloc, hdrsize = self._writeheader(fileobj)
# If the data is to be written below with self._writedata, that
# will also properly update the data location; but it should be
# updated here too
datloc = hdrloc + hdrsize
elif copy:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# Before writing, update the hdrloc with the current file position,
# which is the hdrloc for the new file
hdrloc = fileobj.tell()
fileobj.write(self._file.read(hdrsize))
# The header size is unchanged, but the data location may be
# different from before depending on if previous HDUs were resized
datloc = fileobj.tell()
if self._data_loaded:
if self.data is not None:
# Seek through the array's bases for an memmap'd array; we
# can't rely on the _File object to give us this info since
# the user may have replaced the previous mmap'd array
if copy or self._data_replaced:
# Of course, if we're copying the data to a new file
# we don't care about flushing the original mmap;
# instead just read it into the new file
array_mmap = None
else:
array_mmap = _get_array_mmap(self.data)
if array_mmap is not None:
array_mmap.flush()
else:
self._file.seek(self._data_offset)
datloc, datsize = self._writedata(fileobj)
elif copy:
datsize = self._writedata_direct_copy(fileobj)
self._header_offset = hdrloc
self._data_offset = datloc
self._data_size = datsize
self._data_replaced = False
def _close(self, closed=True):
# If the data was mmap'd, close the underlying mmap (this will
# prevent any future access to the .data attribute if there are
# not other references to it; if there are other references then
# it is up to the user to clean those up
if closed and self._data_loaded and _get_array_mmap(self.data) is not None:
del self.data
# For backwards-compatibility, though nobody should have
# been using this directly:
_AllHDU = _BaseHDU
# For convenience...
# TODO: register_hdu could be made into a class decorator which would be pretty
# cool, but only once 2.6 support is dropped.
register_hdu = _BaseHDU.register_hdu
unregister_hdu = _BaseHDU.unregister_hdu
class _CorruptedHDU(_BaseHDU):
"""
A Corrupted HDU class.
This class is used when one or more mandatory `Card`s are
corrupted (unparsable), such as the ``BITPIX``, ``NAXIS``, or
``END`` cards. A corrupted HDU usually means that the data size
cannot be calculated or the ``END`` card is not found. In the case
of a missing ``END`` card, the `Header` may also contain the binary
data
.. note::
In future, it may be possible to decipher where the last block
of the `Header` ends, but this task may be difficult when the
extension is a `TableHDU` containing ASCII data.
"""
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
# Note: On compressed files this might report a negative size; but the
# file is corrupt anyways so I'm not too worried about it.
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _summary(self):
return (self.name, self.ver, "CorruptedHDU")
def verify(self):
pass
class _NonstandardHDU(_BaseHDU, _Verify):
"""
A Non-standard HDU class.
This class is used for a Primary HDU when the ``SIMPLE`` Card has
a value of `False`. A non-standard HDU comes from a file that
resembles a FITS file but departs from the standards in some
significant way. One example would be files where the numbers are
in the DEC VAX internal storage format rather than the standard
FITS most significant byte first. The header for this HDU should
be valid. The data for this HDU is read from the file as a byte
stream that begins at the first byte after the header ``END`` card
and continues until the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any HDU that has the 'SIMPLE' keyword but is not a standard
Primary or Groups HDU.
"""
# The SIMPLE keyword must be in the first card
card = header.cards[0]
# The check that 'GROUPS' is missing is a bit redundant, since the
# match_header for GroupsHDU will always be called before this one.
if card.keyword == "SIMPLE":
if "GROUPS" not in header and card.value is False:
return True
else:
raise InvalidHDUException
else:
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _writedata(self, fileobj):
"""
Differs from the base class :class:`_writedata` in that it doesn't
automatically add padding, and treats the data as a string of raw bytes
instead of an array.
"""
offset = 0
size = 0
fileobj.flush()
try:
offset = fileobj.tell()
except OSError:
offset = 0
if self.data is not None:
fileobj.write(self.data)
# flush, to make sure the content is written
fileobj.flush()
size = len(self.data)
# return both the location and the size of the data area
return offset, size
def _summary(self):
return (self.name, self.ver, "NonstandardHDU", len(self._header))
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, "ubyte", self._data_offset)
def _verify(self, option="warn"):
errs = _ErrList([], unit="Card")
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
class _ValidHDU(_BaseHDU, _Verify):
"""
Base class for all HDUs which are not corrupted.
"""
def __init__(self, data=None, header=None, name=None, ver=None, **kwargs):
super().__init__(data=data, header=header)
if header is not None and not isinstance(header, (Header, _BasicHeader)):
# TODO: Instead maybe try initializing a new Header object from
# whatever is passed in as the header--there are various types
# of objects that could work for this...
raise ValueError("header must be a Header object")
# NOTE: private data members _checksum and _datasum are used by the
# utility script "fitscheck" to detect missing checksums.
self._checksum = None
self._checksum_valid = None
self._datasum = None
self._datasum_valid = None
if name is not None:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
Matches any HDU that is not recognized as having either the SIMPLE or
XTENSION keyword in its header's first card, but is nonetheless not
corrupted.
TODO: Maybe it would make more sense to use _NonstandardHDU in this
case? Not sure...
"""
return first(header.keys()) not in ("SIMPLE", "XTENSION")
@property
def size(self):
"""
Size (in bytes) of the data portion of the HDU.
"""
return self._header.data_size
def filebytes(self):
"""
Calculates and returns the number of bytes that this HDU will write to
a file.
"""
f = _File()
# TODO: Fix this once new HDU writing API is settled on
return self._writeheader(f)[1] + self._writedata(f)[1]
def fileinfo(self):
"""
Returns a dictionary detailing information about the locations
of this HDU within any associated file. The values are only
valid after a read or write of the associated file with no
intervening changes to the `HDUList`.
Returns
-------
dict or None
The dictionary details information about the locations of
this HDU within an associated file. Returns `None` when
the HDU is not associated with a file.
Dictionary contents:
========== ================================================
Key Value
========== ================================================
file File object associated with the HDU
filemode Mode in which the file was opened (readonly, copyonwrite,
update, append, ostream)
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ================================================
"""
if hasattr(self, "_file") and self._file:
return {
"file": self._file,
"filemode": self._file.mode,
"hdrLoc": self._header_offset,
"datLoc": self._data_offset,
"datSpan": self._data_size,
}
else:
return None
def copy(self):
"""
Make a copy of the HDU, both header and data are copied.
"""
if self.data is not None:
data = self.data.copy()
else:
data = None
return self.__class__(data=data, header=self._header.copy())
def _verify(self, option="warn"):
errs = _ErrList([], unit="Card")
is_valid = BITPIX2DTYPE.__contains__
# Verify location and value of mandatory keywords.
# Do the first card here, instead of in the respective HDU classes, so
# the checking is in order, in case of required cards in wrong order.
if isinstance(self, ExtensionHDU):
firstkey = "XTENSION"
firstval = self._extension
else:
firstkey = "SIMPLE"
firstval = True
self.req_cards(firstkey, 0, None, firstval, option, errs)
self.req_cards(
"BITPIX", 1, lambda v: (_is_int(v) and is_valid(v)), 8, option, errs
)
self.req_cards(
"NAXIS", 2, lambda v: (_is_int(v) and 0 <= v <= 999), 0, option, errs
)
naxis = self._header.get("NAXIS", 0)
if naxis < 1000:
for ax in range(3, naxis + 3):
key = "NAXIS" + str(ax - 2)
self.req_cards(
key,
ax,
lambda v: (_is_int(v) and v >= 0),
_extract_number(self._header[key], default=1),
option,
errs,
)
# Remove NAXISj cards where j is not in range 1, naxis inclusive.
for keyword in self._header:
if keyword.startswith("NAXIS") and len(keyword) > 5:
try:
number = int(keyword[5:])
if number <= 0 or number > naxis:
raise ValueError
except ValueError:
err_text = (
"NAXISj keyword out of range ('{}' when "
"NAXIS == {})".format(keyword, naxis)
)
def fix(self=self, keyword=keyword):
del self._header[keyword]
errs.append(
self.run_option(
option=option,
err_text=err_text,
fix=fix,
fix_text="Deleted.",
)
)
# Verify that the EXTNAME keyword exists and is a string
if "EXTNAME" in self._header:
if not isinstance(self._header["EXTNAME"], str):
err_text = "The EXTNAME keyword must have a string value."
fix_text = "Converted the EXTNAME keyword to a string value."
def fix(header=self._header):
header["EXTNAME"] = str(header["EXTNAME"])
errs.append(
self.run_option(
option, err_text=err_text, fix_text=fix_text, fix=fix
)
)
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
# TODO: Improve this API a little bit--for one, most of these arguments
# could be optional
def req_cards(self, keyword, pos, test, fix_value, option, errlist):
"""
Check the existence, location, and value of a required `Card`.
Parameters
----------
keyword : str
The keyword to validate
pos : int, callable
If an ``int``, this specifies the exact location this card should
have in the header. Remember that Python is zero-indexed, so this
means ``pos=0`` requires the card to be the first card in the
header. If given a callable, it should take one argument--the
actual position of the keyword--and return `True` or `False`. This
can be used for custom evaluation. For example if
``pos=lambda idx: idx > 10`` this will check that the keyword's
index is greater than 10.
test : callable
This should be a callable (generally a function) that is passed the
value of the given keyword and returns `True` or `False`. This can
be used to validate the value associated with the given keyword.
fix_value : str, int, float, complex, bool, None
A valid value for a FITS keyword to to use if the given ``test``
fails to replace an invalid value. In other words, this provides
a default value to use as a replacement if the keyword's current
value is invalid. If `None`, there is no replacement value and the
keyword is unfixable.
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
errlist : list
A list of validation errors already found in the FITS file; this is
used primarily for the validation system to collect errors across
multiple HDUs and multiple calls to `req_cards`.
Notes
-----
If ``pos=None``, the card can be anywhere in the header. If the card
does not exist, the new card will have the ``fix_value`` as its value
when created. Also check the card's value by using the ``test``
argument.
"""
errs = errlist
fix = None
try:
index = self._header.index(keyword)
except ValueError:
index = None
fixable = fix_value is not None
insert_pos = len(self._header) + 1
# If pos is an int, insert at the given position (and convert it to a
# lambda)
if _is_int(pos):
insert_pos = pos
pos = lambda x: x == insert_pos
# if the card does not exist
if index is None:
err_text = f"'{keyword}' card does not exist."
fix_text = f"Fixed by inserting a new '{keyword}' card."
if fixable:
# use repr to accommodate both string and non-string types
# Boolean is also OK in this constructor
card = (keyword, fix_value)
def fix(self=self, insert_pos=insert_pos, card=card):
self._header.insert(insert_pos, card)
errs.append(
self.run_option(
option,
err_text=err_text,
fix_text=fix_text,
fix=fix,
fixable=fixable,
)
)
else:
# if the supposed location is specified
if pos is not None:
if not pos(index):
err_text = f"'{keyword}' card at the wrong place (card {index})."
fix_text = (
f"Fixed by moving it to the right place (card {insert_pos})."
)
def fix(self=self, index=index, insert_pos=insert_pos):
card = self._header.cards[index]
del self._header[index]
self._header.insert(insert_pos, card)
errs.append(
self.run_option(
option, err_text=err_text, fix_text=fix_text, fix=fix
)
)
# if value checking is specified
if test:
val = self._header[keyword]
if not test(val):
err_text = f"'{keyword}' card has invalid value '{val}'."
fix_text = f"Fixed by setting a new value '{fix_value}'."
if fixable:
def fix(self=self, keyword=keyword, val=fix_value):
self._header[keyword] = fix_value
errs.append(
self.run_option(
option,
err_text=err_text,
fix_text=fix_text,
fix=fix,
fixable=fixable,
)
)
return errs
def add_datasum(self, when=None, datasum_keyword="DATASUM"):
"""
Add the ``DATASUM`` card to this HDU with the value set to the
checksum calculated for the data.
Parameters
----------
when : str, optional
Comment string for the card that by default represents the
time when the checksum was calculated
datasum_keyword : str, optional
The name of the header keyword to store the datasum value in;
this is typically 'DATASUM' per convention, but there exist
use cases in which a different keyword should be used
Returns
-------
checksum : int
The calculated datasum
Notes
-----
For testing purposes, provide a ``when`` argument to enable the comment
value in the card to remain consistent. This will enable the
generation of a ``CHECKSUM`` card with a consistent value.
"""
cs = self._calculate_datasum()
if when is None:
when = f"data unit checksum updated {self._get_timestamp()}"
self._header[datasum_keyword] = (str(cs), when)
return cs
def add_checksum(
self,
when=None,
override_datasum=False,
checksum_keyword="CHECKSUM",
datasum_keyword="DATASUM",
):
"""
Add the ``CHECKSUM`` and ``DATASUM`` cards to this HDU with
the values set to the checksum calculated for the HDU and the
data respectively. The addition of the ``DATASUM`` card may
be overridden.
Parameters
----------
when : str, optional
comment string for the cards; by default the comments
will represent the time when the checksum was calculated
override_datasum : bool, optional
add the ``CHECKSUM`` card only
checksum_keyword : str, optional
The name of the header keyword to store the checksum value in; this
is typically 'CHECKSUM' per convention, but there exist use cases
in which a different keyword should be used
datasum_keyword : str, optional
See ``checksum_keyword``
Notes
-----
For testing purposes, first call `add_datasum` with a ``when``
argument, then call `add_checksum` with a ``when`` argument and
``override_datasum`` set to `True`. This will provide consistent
comments for both cards and enable the generation of a ``CHECKSUM``
card with a consistent value.
"""
if not override_datasum:
# Calculate and add the data checksum to the header.
data_cs = self.add_datasum(when, datasum_keyword=datasum_keyword)
else:
# Just calculate the data checksum
data_cs = self._calculate_datasum()
if when is None:
when = f"HDU checksum updated {self._get_timestamp()}"
# Add the CHECKSUM card to the header with a value of all zeros.
if datasum_keyword in self._header:
self._header.set(checksum_keyword, "0" * 16, when, before=datasum_keyword)
else:
self._header.set(checksum_keyword, "0" * 16, when)
csum = self._calculate_checksum(data_cs, checksum_keyword=checksum_keyword)
self._header[checksum_keyword] = csum
def verify_datasum(self):
"""
Verify that the value in the ``DATASUM`` keyword matches the value
calculated for the ``DATASUM`` of the current HDU data.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``DATASUM`` keyword present
"""
if "DATASUM" in self._header:
datasum = self._calculate_datasum()
if datasum == int(self._header["DATASUM"]):
return 1
else:
# Failed
return 0
else:
return 2
def verify_checksum(self):
"""
Verify that the value in the ``CHECKSUM`` keyword matches the
value calculated for the current HDU CHECKSUM.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``CHECKSUM`` keyword present
"""
if "CHECKSUM" in self._header:
if "DATASUM" in self._header:
datasum = self._calculate_datasum()
else:
datasum = 0
checksum = self._calculate_checksum(datasum)
if checksum == self._header["CHECKSUM"]:
return 1
else:
# Failed
return 0
else:
return 2
def _verify_checksum_datasum(self):
"""
Verify the checksum/datasum values if the cards exist in the header.
Simply displays warnings if either the checksum or datasum don't match.
"""
if "CHECKSUM" in self._header:
self._checksum = self._header["CHECKSUM"]
self._checksum_valid = self.verify_checksum()
if not self._checksum_valid:
warnings.warn(
f"Checksum verification failed for HDU {self.name, self.ver}.\n",
AstropyUserWarning,
)
if "DATASUM" in self._header:
self._datasum = self._header["DATASUM"]
self._datasum_valid = self.verify_datasum()
if not self._datasum_valid:
warnings.warn(
f"Datasum verification failed for HDU {self.name, self.ver}.\n",
AstropyUserWarning,
)
def _get_timestamp(self):
"""
Return the current timestamp in ISO 8601 format, with microseconds
stripped off.
Ex.: 2007-05-30T19:05:11
"""
return datetime.datetime.now().isoformat()[:19]
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if not self._data_loaded:
# This is the case where the data has not been read from the file
# yet. We find the data in the file, read it, and calculate the
# datasum.
if self.size > 0:
raw_data = self._get_raw_data(
self._data_size, "ubyte", self._data_offset
)
return self._compute_checksum(raw_data)
else:
return 0
elif self.data is not None:
return self._compute_checksum(self.data.view("ubyte"))
else:
return 0
def _calculate_checksum(self, datasum, checksum_keyword="CHECKSUM"):
"""
Calculate the value of the ``CHECKSUM`` card in the HDU.
"""
old_checksum = self._header[checksum_keyword]
self._header[checksum_keyword] = "0" * 16
# Convert the header to bytes.
s = self._header.tostring().encode("utf8")
# Calculate the checksum of the Header and data.
cs = self._compute_checksum(np.frombuffer(s, dtype="ubyte"), datasum)
# Encode the checksum into a string.
s = self._char_encode(~cs)
# Return the header card value.
self._header[checksum_keyword] = old_checksum
return s
def _compute_checksum(self, data, sum32=0):
"""
Compute the ones-complement checksum of a sequence of bytes.
Parameters
----------
data
a memory region to checksum
sum32
incremental checksum value from another region
Returns
-------
ones complement checksum
"""
blocklen = 2880
sum32 = np.uint32(sum32)
for i in range(0, len(data), blocklen):
length = min(blocklen, len(data) - i) # ????
sum32 = self._compute_hdu_checksum(data[i : i + length], sum32)
return sum32
def _compute_hdu_checksum(self, data, sum32=0):
"""
Translated from FITS Checksum Proposal by Seaman, Pence, and Rots.
Use uint32 literals as a hedge against type promotion to int64.
This code should only be called with blocks of 2880 bytes
Longer blocks result in non-standard checksums with carry overflow
Historically, this code *was* called with larger blocks and for that
reason still needs to be for backward compatibility.
"""
u8 = np.uint32(8)
u16 = np.uint32(16)
uFFFF = np.uint32(0xFFFF)
if data.nbytes % 2:
last = data[-1]
data = data[:-1]
else:
last = np.uint32(0)
data = data.view(">u2")
hi = sum32 >> u16
lo = sum32 & uFFFF
hi += np.add.reduce(data[0::2], dtype=np.uint64)
lo += np.add.reduce(data[1::2], dtype=np.uint64)
if (data.nbytes // 2) % 2:
lo += last << u8
else:
hi += last << u8
hicarry = hi >> u16
locarry = lo >> u16
while hicarry or locarry:
hi = (hi & uFFFF) + locarry
lo = (lo & uFFFF) + hicarry
hicarry = hi >> u16
locarry = lo >> u16
return (hi << u16) + lo
# _MASK and _EXCLUDE used for encoding the checksum value into a character
# string.
_MASK = [0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF]
_EXCLUDE = [0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40,
0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60] # fmt: skip
def _encode_byte(self, byte):
"""
Encode a single byte.
"""
quotient = byte // 4 + ord("0")
remainder = byte % 4
ch = np.array(
[(quotient + remainder), quotient, quotient, quotient], dtype="int32"
)
check = True
while check:
check = False
for x in self._EXCLUDE:
for j in [0, 2]:
if ch[j] == x or ch[j + 1] == x:
ch[j] += 1
ch[j + 1] -= 1
check = True
return ch
def _char_encode(self, value):
"""
Encodes the checksum ``value`` using the algorithm described
in SPR section A.7.2 and returns it as a 16 character string.
Parameters
----------
value
a checksum
Returns
-------
ascii encoded checksum
"""
value = np.uint32(value)
asc = np.zeros((16,), dtype="byte")
ascii = np.zeros((16,), dtype="byte")
for i in range(4):
byte = (value & self._MASK[i]) >> ((3 - i) * 8)
ch = self._encode_byte(byte)
for j in range(4):
asc[4 * j + i] = ch[j]
for i in range(16):
ascii[i] = asc[(i + 15) % 16]
return decode_ascii(ascii.tobytes())
class ExtensionHDU(_ValidHDU):
"""
An extension HDU class.
This class is the base class for the `TableHDU`, `ImageHDU`, and
`BinTableHDU` classes.
"""
_extension = ""
@classmethod
def match_header(cls, header):
"""
This class should never be instantiated directly. Either a standard
extension HDU type should be used for a specific extension, or
NonstandardExtHDU should be used.
"""
raise NotImplementedError
def writeto(self, name, output_verify="exception", overwrite=False, checksum=False):
"""
Works similarly to the normal writeto(), but prepends a default
`PrimaryHDU` are required by extension HDUs (which cannot stand on
their own).
"""
from .hdulist import HDUList
from .image import PrimaryHDU
hdulist = HDUList([PrimaryHDU(), self])
hdulist.writeto(name, output_verify, overwrite=overwrite, checksum=checksum)
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
naxis = self._header.get("NAXIS", 0)
self.req_cards(
"PCOUNT", naxis + 3, lambda v: (_is_int(v) and v >= 0), 0, option, errs
)
self.req_cards(
"GCOUNT", naxis + 4, lambda v: (_is_int(v) and v == 1), 1, option, errs
)
return errs
# For backwards compatibility, though this needs to be deprecated
# TODO: Mark this as deprecated
_ExtensionHDU = ExtensionHDU
class NonstandardExtHDU(ExtensionHDU):
"""
A Non-standard Extension HDU class.
This class is used for an Extension HDU when the ``XTENSION``
`Card` has a non-standard value. In this case, Astropy can figure
out how big the data is but not what it is. The data for this HDU
is read from the file as a byte stream that begins at the first
byte after the header ``END`` card and continues until the
beginning of the next header or the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any extension HDU that is not one of the standard extension HDU
types.
"""
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
# A3DTABLE is not really considered a 'standard' extension, as it was
# sort of the prototype for BINTABLE; however, since our BINTABLE
# implementation handles A3DTABLE HDUs it is listed here.
standard_xtensions = ("IMAGE", "TABLE", "BINTABLE", "A3DTABLE")
# The check that xtension is not one of the standard types should be
# redundant.
return card.keyword == "XTENSION" and xtension not in standard_xtensions
def _summary(self):
axes = tuple(self.data.shape)
return (self.name, self.ver, "NonstandardExtHDU", len(self._header), axes)
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, "ubyte", self._data_offset)
# TODO: Mark this as deprecated
_NonstandardExtHDU = NonstandardExtHDU
|
b92e1b0c1ff82cccc9bd103e179225e77482c76ab165636859da60c983d215e9 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import os
import re
import shutil
import sys
import warnings
import numpy as np
from astropy.io.fits.file import FILE_MODES, _File
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import (
_free_space_check,
_get_array_mmap,
_is_int,
_tmp_name,
fileobj_closed,
fileobj_mode,
ignore_sigint,
isfile,
)
from astropy.io.fits.verify import VerifyError, VerifyWarning, _ErrList, _Verify
from astropy.utils import indent
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
from astropy.utils.exceptions import AstropyUserWarning
from . import compressed
from .base import ExtensionHDU, _BaseHDU, _NonstandardHDU, _ValidHDU
from .groups import GroupsHDU
from .image import ImageHDU, PrimaryHDU
if HAS_BZ2:
import bz2
__all__ = ["HDUList", "fitsopen"]
# FITS file signature as per RFC 4047
FITS_SIGNATURE = b"SIMPLE = T"
def fitsopen(
name,
mode="readonly",
memmap=None,
save_backup=False,
cache=True,
lazy_load_hdus=None,
ignore_missing_simple=False,
*,
use_fsspec=None,
fsspec_kwargs=None,
**kwargs,
):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : str, file-like or `pathlib.Path`
File to be opened.
mode : str, optional
Open mode, 'readonly', 'update', 'append', 'denywrite', or
'ostream'. Default is 'readonly'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used? This value is obtained from the
configuration item ``astropy.io.fits.Conf.use_memmap``.
Default is `True`.
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that
a backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
Default is `False`.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache. Default is `True`.
lazy_load_hdus : bool, optional
To avoid reading all the HDUs and headers in a FITS file immediately
upon opening. This is an optimization especially useful for large
files, as FITS has no way of determining the number and offsets of all
the HDUs in a file without scanning through the file and reading all
the headers. Default is `True`.
To disable lazy loading and read all HDUs immediately (the old
behavior) use ``lazy_load_hdus=False``. This can lead to fewer
surprises--for example with lazy loading enabled, ``len(hdul)``
can be slow, as it means the entire FITS file needs to be read in
order to determine the number of HDUs. ``lazy_load_hdus=False``
ensures that all HDUs have already been loaded after the file has
been opened.
.. versionadded:: 1.3
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the central value and
``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data
with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as
``uint16`` data. Default is `True` so that the pseudo-unsigned
integer convention is assumed.
ignore_missing_end : bool, optional
Do not raise an exception when opening a file that is missing an
``END`` card in the last header. Default is `False`.
ignore_missing_simple : bool, optional
Do not raise an exception when the SIMPLE keyword is missing. Note
that io.fits will raise a warning if a SIMPLE card is present but
written in a way that does not follow the FITS Standard.
Default is `False`.
.. versionadded:: 4.2
checksum : bool, str, optional
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values
(when present in the HDU header) match the header and data of all HDU's
in the file. Updates to a file that already has a checksum will
preserve and update the existing checksums unless this argument is
given a value of 'remove', in which case the CHECKSUM and DATASUM
values are not checked, and are removed when saving changes to the
file. Default is `False`.
disable_image_compression : bool, optional
If `True`, treats compressed image HDU's like normal binary table
HDU's. Default is `False`.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. Default is `False`.
character_as_bytes : bool, optional
Whether to return bytes for string columns, otherwise unicode strings
are returned, but this does not respect memory mapping and loads the
whole column in memory when accessed. Default is `False`.
ignore_blank : bool, optional
If `True`, the BLANK keyword is ignored if present.
Default is `False`.
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled image
data, restore the data to the original type and reapply the original
BSCALE/BZERO values. This could lead to loss of accuracy if scaling
back to integer values after performing floating point operations on
the data. Default is `False`.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name`` starts with the Amazon S3 storage prefix ``s3://`` or the
Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g., ``http://``) but in this case you must
explicitly pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
Returns
-------
hdulist : `HDUList`
`HDUList` containing all of the header data units in the file.
"""
from astropy.io.fits import conf
if memmap is None:
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if conf.use_memmap else False
else:
memmap = bool(memmap)
if lazy_load_hdus is None:
lazy_load_hdus = conf.lazy_load_hdus
else:
lazy_load_hdus = bool(lazy_load_hdus)
if "uint" not in kwargs:
kwargs["uint"] = conf.enable_uint
if not name:
raise ValueError(f"Empty filename: {name!r}")
return HDUList.fromfile(
name,
mode,
memmap,
save_backup,
cache,
lazy_load_hdus,
ignore_missing_simple,
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
**kwargs,
)
class HDUList(list, _Verify):
"""
HDU list class. This is the top-level FITS object. When a FITS
file is opened, a `HDUList` object is returned.
"""
def __init__(self, hdus=[], file=None):
"""
Construct a `HDUList` object.
Parameters
----------
hdus : BaseHDU or sequence thereof, optional
The HDU object(s) to comprise the `HDUList`. Should be
instances of HDU classes like `ImageHDU` or `BinTableHDU`.
file : file-like, bytes, optional
The opened physical file associated with the `HDUList`
or a bytes object containing the contents of the FITS
file.
"""
if isinstance(file, bytes):
self._data = file
self._file = None
else:
self._file = file
self._data = None
# For internal use only--the keyword args passed to fitsopen /
# HDUList.fromfile/string when opening the file
self._open_kwargs = {}
self._in_read_next_hdu = False
# If we have read all the HDUs from the file or not
# The assumes that all HDUs have been written when we first opened the
# file; we do not currently support loading additional HDUs from a file
# while it is being streamed to. In the future that might be supported
# but for now this is only used for the purpose of lazy-loading of
# existing HDUs.
if file is None:
self._read_all = True
elif self._file is not None:
# Should never attempt to read HDUs in ostream mode
self._read_all = self._file.mode == "ostream"
else:
self._read_all = False
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise TypeError("Invalid input for HDUList.")
for idx, hdu in enumerate(hdus):
if not isinstance(hdu, _BaseHDU):
raise TypeError(f"Element {idx} in the HDUList input is not an HDU.")
super().__init__(hdus)
if file is None:
# Only do this when initializing from an existing list of HDUs
# When initializing from a file, this will be handled by the
# append method after the first HDU is read
self.update_extend()
def __len__(self):
if not self._in_read_next_hdu:
self.readall()
return super().__len__()
def __repr__(self):
# Special case: if the FITS file is located on a remote file system
# and has not been fully read yet, we return a simplified repr to
# avoid downloading the entire file. We can tell that a file is remote
# from the fact that the ``fsspec`` package was used to open it.
is_fsspec_file = self._file and "fsspec" in str(
self._file._file.__class__.__bases__
)
if not self._read_all and is_fsspec_file:
return f"{type(self)} (partially read)"
# In order to correctly repr an HDUList we need to load all the
# HDUs as well
self.readall()
return super().__repr__()
def __iter__(self):
# While effectively this does the same as:
# for idx in range(len(self)):
# yield self[idx]
# the more complicated structure is here to prevent the use of len(),
# which would break the lazy loading
for idx in itertools.count():
try:
yield self[idx]
except IndexError:
break
def __getitem__(self, key):
"""
Get an HDU from the `HDUList`, indexed by number or name.
"""
# If the key is a slice we need to make sure the necessary HDUs
# have been loaded before passing the slice on to super.
if isinstance(key, slice):
max_idx = key.stop
# Check for and handle the case when no maximum was
# specified (e.g. [1:]).
if max_idx is None:
# We need all of the HDUs, so load them
# and reset the maximum to the actual length.
max_idx = len(self)
# Just in case the max_idx is negative...
max_idx = self._positive_index_of(max_idx)
number_loaded = super().__len__()
if max_idx >= number_loaded:
# We need more than we have, try loading up to and including
# max_idx. Note we do not try to be clever about skipping HDUs
# even though key.step might conceivably allow it.
for i in range(number_loaded, max_idx):
# Read until max_idx or to the end of the file, whichever
# comes first.
if not self._read_next_hdu():
break
try:
hdus = super().__getitem__(key)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError(
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
else:
return HDUList(hdus)
# Originally this used recursion, but hypothetically an HDU with
# a very large number of HDUs could blow the stack, so use a loop
# instead
try:
return self._try_while_unread_hdus(
super().__getitem__, self._positive_index_of(key)
)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError(
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
def __contains__(self, item):
"""
Returns `True` if ``item`` is an ``HDU`` _in_ ``self`` or a valid
extension specification (e.g., integer extension number, extension
name, or a tuple of extension name and an extension version)
of a ``HDU`` in ``self``.
"""
try:
self._try_while_unread_hdus(self.index_of, item)
except (KeyError, ValueError):
return False
return True
def __setitem__(self, key, hdu):
"""
Set an HDU to the `HDUList`, indexed by number or name.
"""
_key = self._positive_index_of(key)
if isinstance(hdu, (slice, list)):
if _is_int(_key):
raise ValueError("An element in the HDUList must be an HDU.")
for item in hdu:
if not isinstance(item, _BaseHDU):
raise ValueError(f"{item} is not an HDU.")
else:
if not isinstance(hdu, _BaseHDU):
raise ValueError(f"{hdu} is not an HDU.")
try:
self._try_while_unread_hdus(super().__setitem__, _key, hdu)
except IndexError:
raise IndexError(f"Extension {key} is out of bound or not found.")
self._resize = True
self._truncate = False
def __delitem__(self, key):
"""
Delete an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
end_index = len(self)
else:
key = self._positive_index_of(key)
end_index = len(self) - 1
self._try_while_unread_hdus(super().__delitem__, key)
if key == end_index or key == -1 and not self._resize:
self._truncate = True
else:
self._truncate = False
self._resize = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
output_verify = self._open_kwargs.get("output_verify", "exception")
self.close(output_verify=output_verify)
@classmethod
def fromfile(
cls,
fileobj,
mode=None,
memmap=None,
save_backup=False,
cache=True,
lazy_load_hdus=True,
ignore_missing_simple=False,
**kwargs,
):
"""
Creates an `HDUList` instance from a file-like object.
The actual implementation of ``fitsopen()``, and generally shouldn't
be used directly. Use :func:`open` instead (and see its
documentation for details of the parameters accepted by this method).
"""
return cls._readfrom(
fileobj=fileobj,
mode=mode,
memmap=memmap,
save_backup=save_backup,
cache=cache,
ignore_missing_simple=ignore_missing_simple,
lazy_load_hdus=lazy_load_hdus,
**kwargs,
)
@classmethod
def fromstring(cls, data, **kwargs):
"""
Creates an `HDUList` instance from a string or other in-memory data
buffer containing an entire FITS file. Similar to
:meth:`HDUList.fromfile`, but does not accept the mode or memmap
arguments, as they are only relevant to reading from a file on disk.
This is useful for interfacing with other libraries such as CFITSIO,
and may also be useful for streaming applications.
Parameters
----------
data : str, buffer-like, etc.
A string or other memory buffer containing an entire FITS file.
Buffer-like objects include :class:`~bytes`, :class:`~bytearray`,
:class:`~memoryview`, and :class:`~numpy.ndarray`.
It should be noted that if that memory is read-only (such as a
Python string) the returned :class:`HDUList`'s data portions will
also be read-only.
**kwargs : dict
Optional keyword arguments. See
:func:`astropy.io.fits.open` for details.
Returns
-------
hdul : HDUList
An :class:`HDUList` object representing the in-memory FITS file.
"""
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype="ubyte", buffer=data)
except TypeError:
raise TypeError(
f"The provided object {data} does not contain an underlying "
"memory buffer. fromstring() requires an object that "
"supports the buffer interface such as bytes, buffer, "
"memoryview, ndarray, etc. This restriction is to ensure "
"that efficient access to the array/table data is possible."
)
return cls._readfrom(data=data, **kwargs)
def fileinfo(self, index):
"""
Returns a dictionary detailing information about the locations
of the indexed HDU within any associated file. The values are
only valid after a read or write of the associated file with
no intervening changes to the `HDUList`.
Parameters
----------
index : int
Index of HDU for which info is to be returned.
Returns
-------
fileinfo : dict or None
The dictionary details information about the locations of
the indexed HDU within an associated file. Returns `None`
when the HDU is not associated with a file.
Dictionary contents:
========== ========================================================
Key Value
========== ========================================================
file File object associated with the HDU
filename Name of associated file object
filemode Mode in which the file was opened (readonly,
update, append, denywrite, ostream)
resized Flag that when `True` indicates that the data has been
resized since the last read/write so the returned values
may not be valid.
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ========================================================
"""
if self._file is not None:
output = self[index].fileinfo()
if not output:
# OK, the HDU associated with this index is not yet
# tied to the file associated with the HDUList. The only way
# to get the file object is to check each of the HDU's in the
# list until we find the one associated with the file.
f = None
for hdu in self:
info = hdu.fileinfo()
if info:
f = info["file"]
fm = info["filemode"]
break
output = {
"file": f,
"filemode": fm,
"hdrLoc": None,
"datLoc": None,
"datSpan": None,
}
output["filename"] = self._file.name
output["resized"] = self._wasresized()
else:
output = None
return output
def __copy__(self):
"""
Return a shallow copy of an HDUList.
Returns
-------
copy : `HDUList`
A shallow copy of this `HDUList` object.
"""
return self[:]
# Syntactic sugar for `__copy__()` magic method
copy = __copy__
def __deepcopy__(self, memo=None):
return HDUList([hdu.copy() for hdu in self])
def pop(self, index=-1):
"""Remove an item from the list and return it.
Parameters
----------
index : int, str, tuple of (string, int), optional
An integer value of ``index`` indicates the position from which
``pop()`` removes and returns an HDU. A string value or a tuple
of ``(string, int)`` functions as a key for identifying the
HDU to be removed and returned. If ``key`` is a tuple, it is
of the form ``(key, ver)`` where ``ver`` is an ``EXTVER``
value that must match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous the numeric index
must be used to index the duplicate HDU.
Returns
-------
hdu : BaseHDU
The HDU object at position indicated by ``index`` or having name
and version specified by ``index``.
"""
# Make sure that HDUs are loaded before attempting to pop
self.readall()
list_index = self.index_of(index)
return super().pop(list_index)
def insert(self, index, hdu):
"""
Insert an HDU into the `HDUList` at the given ``index``.
Parameters
----------
index : int
Index before which to insert the new HDU.
hdu : BaseHDU
The HDU object to insert
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError(f"{hdu} is not an HDU.")
num_hdus = len(self)
if index == 0 or num_hdus == 0:
if num_hdus != 0:
# We are inserting a new Primary HDU so we need to
# make the current Primary HDU into an extension HDU.
if isinstance(self[0], GroupsHDU):
raise ValueError(
"The current Primary HDU is a GroupsHDU. "
"It can't be made into an extension HDU, "
"so another HDU cannot be inserted before it."
)
hdu1 = ImageHDU(self[0].data, self[0].header)
# Insert it into position 1, then delete HDU at position 0.
super().insert(1, hdu1)
super().__delitem__(0)
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().insert(0, phdu)
index = 1
else:
if isinstance(hdu, GroupsHDU):
raise ValueError("A GroupsHDU must be inserted as a Primary HDU.")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
hdu = ImageHDU(hdu.data, hdu.header)
super().insert(index, hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def append(self, hdu):
"""
Append a new HDU to the `HDUList`.
Parameters
----------
hdu : BaseHDU
HDU to add to the `HDUList`.
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError("HDUList can only append an HDU.")
if len(self) > 0:
if isinstance(hdu, GroupsHDU):
raise ValueError("Can't append a GroupsHDU to a non-empty HDUList")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
# TODO: This isn't necessarily sufficient to copy the HDU;
# _header_offset and friends need to be copied too.
hdu = ImageHDU(hdu.data, hdu.header)
else:
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary
# HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().append(phdu)
super().append(hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def index_of(self, key):
"""
Get the index of an HDU from the `HDUList`.
Parameters
----------
key : int, str, tuple of (string, int) or BaseHDU
The key identifying the HDU. If ``key`` is a tuple, it is of the
form ``(name, ver)`` where ``ver`` is an ``EXTVER`` value that must
match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous (it shouldn't be
but it's not impossible) the numeric index must be used to index
the duplicate HDU.
When ``key`` is an HDU object, this function returns the
index of that HDU object in the ``HDUList``.
Returns
-------
index : int
The index of the HDU in the `HDUList`.
Raises
------
ValueError
If ``key`` is an HDU object and it is not found in the ``HDUList``.
KeyError
If an HDU specified by the ``key`` that is an extension number,
extension name, or a tuple of extension name and version is not
found in the ``HDUList``.
"""
if _is_int(key):
return key
elif isinstance(key, tuple):
_key, _ver = key
elif isinstance(key, _BaseHDU):
return self.index(key)
else:
_key = key
_ver = None
if not isinstance(_key, str):
raise KeyError(
"{} indices must be integers, extension names as strings, "
"or (extname, version) tuples; got {}"
"".format(self.__class__.__name__, _key)
)
_key = (_key.strip()).upper()
found = None
for idx, hdu in enumerate(self):
name = hdu.name
if isinstance(name, str):
name = name.strip().upper()
# 'PRIMARY' should always work as a reference to the first HDU
if (name == _key or (_key == "PRIMARY" and idx == 0)) and (
_ver is None or _ver == hdu.ver
):
found = idx
break
if found is None:
raise KeyError(f"Extension {key!r} not found.")
else:
return found
def _positive_index_of(self, key):
"""
Same as index_of, but ensures always returning a positive index
or zero.
(Really this should be called non_negative_index_of but it felt
too long.)
This means that if the key is a negative integer, we have to
convert it to the corresponding positive index. This means
knowing the length of the HDUList, which in turn means loading
all HDUs. Therefore using negative indices on HDULists is inherently
inefficient.
"""
index = self.index_of(key)
if index >= 0:
return index
if abs(index) > len(self):
raise IndexError(f"Extension {index} is out of bound or not found.")
return len(self) + index
def readall(self):
"""
Read data of all HDUs into memory.
"""
while self._read_next_hdu():
pass
@ignore_sigint
def flush(self, output_verify="fix", verbose=False):
"""
Force a write of the `HDUList` back to the file (for append and
update modes only).
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
verbose : bool
When `True`, print verbose messages
"""
if self._file.mode not in ("append", "update", "ostream"):
warnings.warn(
f"Flush for '{self._file.mode}' mode is not supported.",
AstropyUserWarning,
)
return
save_backup = self._open_kwargs.get("save_backup", False)
if save_backup and self._file.mode in ("append", "update"):
filename = self._file.name
if os.path.exists(filename):
# The the file doesn't actually exist anymore for some reason
# then there's no point in trying to make a backup
backup = filename + ".bak"
idx = 1
while os.path.exists(backup):
backup = filename + ".bak." + str(idx)
idx += 1
warnings.warn(
f"Saving a backup of {filename} to {backup}.", AstropyUserWarning
)
try:
shutil.copy(filename, backup)
except OSError as exc:
raise OSError(
f"Failed to save backup to destination {filename}"
) from exc
self.verify(option=output_verify)
if self._file.mode in ("append", "ostream"):
for hdu in self:
if verbose:
try:
extver = str(hdu._header["extver"])
except KeyError:
extver = ""
# only append HDU's which are "new"
if hdu._new:
hdu._prewriteto(checksum=hdu._output_checksum)
with _free_space_check(self):
hdu._writeto(self._file)
if verbose:
print("append HDU", hdu.name, extver)
hdu._new = False
hdu._postwriteto()
elif self._file.mode == "update":
self._flush_update()
def update_extend(self):
"""
Make sure that if the primary header needs the keyword ``EXTEND`` that
it has it and it is correct.
"""
if not len(self):
return
if not isinstance(self[0], PrimaryHDU):
# A PrimaryHDU will be automatically inserted at some point, but it
# might not have been added yet
return
hdr = self[0].header
def get_first_ext():
try:
return self[1]
except IndexError:
return None
if "EXTEND" in hdr:
if not hdr["EXTEND"] and get_first_ext() is not None:
hdr["EXTEND"] = True
elif get_first_ext() is not None:
if hdr["NAXIS"] == 0:
hdr.set("EXTEND", True, after="NAXIS")
else:
n = hdr["NAXIS"]
hdr.set("EXTEND", True, after="NAXIS" + str(n))
def writeto(
self, fileobj, output_verify="exception", overwrite=False, checksum=False
):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : str, file-like or `pathlib.Path`
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
"""
if len(self) == 0:
warnings.warn("There is nothing to write.", AstropyUserWarning)
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, str) or fileobj_closed(fileobj)
mode = FILE_MODES[fileobj_mode(fileobj)] if isfile(fileobj) else "ostream"
# This can accept an open file object that's open to write only, or in
# append/update modes but only if the file doesn't exist.
fileobj = _File(fileobj, mode=mode, overwrite=overwrite)
hdulist = self.fromfile(fileobj)
try:
dirname = os.path.dirname(hdulist._file.name)
except (AttributeError, TypeError):
dirname = None
try:
with _free_space_check(self, dirname=dirname):
for hdu in self:
hdu._prewriteto(checksum=checksum)
hdu._writeto(hdulist._file)
hdu._postwriteto()
finally:
hdulist.close(output_verify=output_verify, closed=closed)
def close(self, output_verify="exception", verbose=False, closed=True):
"""
Close the associated FITS file and memmap object, if any.
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
verbose : bool
When `True`, print out verbose messages.
closed : bool
When `True`, close the underlying file object.
"""
try:
if (
self._file
and self._file.mode in ("append", "update")
and not self._file.closed
):
self.flush(output_verify=output_verify, verbose=verbose)
finally:
if self._file and closed and hasattr(self._file, "close"):
self._file.close()
# Give individual HDUs an opportunity to do on-close cleanup
for hdu in self:
hdu._close(closed=closed)
def info(self, output=None):
"""
Summarize the info of the HDUs in this `HDUList`.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file-like or bool, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the HDU info. Writes to ``sys.stdout`` by default.
"""
if output is None:
output = sys.stdout
if self._file is None:
name = "(No file associated with this HDUList)"
else:
name = self._file.name
results = [
f"Filename: {name}",
"No. Name Ver Type Cards Dimensions Format",
]
format = "{:3d} {:10} {:3} {:11} {:5d} {} {} {}"
default = ("", "", "", 0, (), "", "")
for idx, hdu in enumerate(self):
summary = hdu._summary()
if len(summary) < len(default):
summary += default[len(summary) :]
summary = (idx,) + summary
if output:
results.append(format.format(*summary))
else:
results.append(summary)
if output:
output.write("\n".join(results))
output.write("\n")
output.flush()
else:
return results[2:]
def filename(self):
"""
Return the file name associated with the HDUList object if one exists.
Otherwise returns None.
Returns
-------
filename : str
A string containing the file name associated with the HDUList
object if an association exists. Otherwise returns None.
"""
if self._file is not None:
if hasattr(self._file, "name"):
return self._file.name
return None
@classmethod
def _readfrom(
cls,
fileobj=None,
data=None,
mode=None,
memmap=None,
cache=True,
lazy_load_hdus=True,
ignore_missing_simple=False,
*,
use_fsspec=None,
fsspec_kwargs=None,
**kwargs,
):
"""
Provides the implementations from HDUList.fromfile and
HDUList.fromstring, both of which wrap this method, as their
implementations are largely the same.
"""
if fileobj is not None:
if not isinstance(fileobj, _File):
# instantiate a FITS file object (ffo)
fileobj = _File(
fileobj,
mode=mode,
memmap=memmap,
cache=cache,
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
)
# The Astropy mode is determined by the _File initializer if the
# supplied mode was None
mode = fileobj.mode
hdulist = cls(file=fileobj)
else:
if mode is None:
# The default mode
mode = "readonly"
hdulist = cls(file=data)
# This method is currently only called from HDUList.fromstring and
# HDUList.fromfile. If fileobj is None then this must be the
# fromstring case; the data type of ``data`` will be checked in the
# _BaseHDU.fromstring call.
if (
not ignore_missing_simple
and hdulist._file
and hdulist._file.mode != "ostream"
and hdulist._file.size > 0
):
pos = hdulist._file.tell()
# FITS signature is supposed to be in the first 30 bytes, but to
# allow reading various invalid files we will check in the first
# card (80 bytes).
simple = hdulist._file.read(80)
match_sig = simple[:29] == FITS_SIGNATURE[:-1] and simple[29:30] in (
b"T",
b"F",
)
if not match_sig:
# Check the SIMPLE card is there but not written correctly
match_sig_relaxed = re.match(rb"SIMPLE\s*=\s*[T|F]", simple)
if match_sig_relaxed:
warnings.warn(
"Found a SIMPLE card but its format doesn't"
" respect the FITS Standard",
VerifyWarning,
)
else:
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError(
"No SIMPLE card found, this file does not appear to "
"be a valid FITS file. If this is really a FITS file, "
"try with ignore_missing_simple=True"
)
hdulist._file.seek(pos)
# Store additional keyword args that were passed to fits.open
hdulist._open_kwargs = kwargs
if fileobj is not None and fileobj.writeonly:
# Output stream--not interested in reading/parsing
# the HDUs--just writing to the output file
return hdulist
# Make sure at least the PRIMARY HDU can be read
read_one = hdulist._read_next_hdu()
# If we're trying to read only and no header units were found,
# raise an exception
if not read_one and mode in ("readonly", "denywrite"):
# Close the file if necessary (issue #6168)
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError("Empty or corrupt FITS file")
if not lazy_load_hdus or kwargs.get("checksum") is True:
# Go ahead and load all HDUs
while hdulist._read_next_hdu():
pass
# initialize/reset attributes to be used in "update/append" mode
hdulist._resize = False
hdulist._truncate = False
return hdulist
def _try_while_unread_hdus(self, func, *args, **kwargs):
"""
Attempt an operation that accesses an HDU by index/name
that can fail if not all HDUs have been read yet. Keep
reading HDUs until the operation succeeds or there are no
more HDUs to read.
"""
while True:
try:
return func(*args, **kwargs)
except Exception:
if self._read_next_hdu():
continue
else:
raise
def _read_next_hdu(self):
"""
Lazily load a single HDU from the fileobj or data string the `HDUList`
was opened from, unless no further HDUs are found.
Returns True if a new HDU was loaded, or False otherwise.
"""
if self._read_all:
return False
saved_compression_enabled = compressed.COMPRESSION_ENABLED
fileobj, data, kwargs = self._file, self._data, self._open_kwargs
if fileobj is not None and fileobj.closed:
return False
try:
self._in_read_next_hdu = True
if (
"disable_image_compression" in kwargs
and kwargs["disable_image_compression"]
):
compressed.COMPRESSION_ENABLED = False
# read all HDUs
try:
if fileobj is not None:
try:
# Make sure we're back to the end of the last read
# HDU
if len(self) > 0:
last = self[len(self) - 1]
if last._data_offset is not None:
offset = last._data_offset + last._data_size
fileobj.seek(offset, os.SEEK_SET)
hdu = _BaseHDU.readfrom(fileobj, **kwargs)
except EOFError:
self._read_all = True
return False
except OSError:
# Close the file: see
# https://github.com/astropy/astropy/issues/6168
#
if self._file.close_on_error:
self._file.close()
if fileobj.writeonly:
self._read_all = True
return False
else:
raise
else:
if not data:
self._read_all = True
return False
hdu = _BaseHDU.fromstring(data, **kwargs)
self._data = data[hdu._data_offset + hdu._data_size :]
super().append(hdu)
if len(self) == 1:
# Check for an extension HDU and update the EXTEND
# keyword of the primary HDU accordingly
self.update_extend()
hdu._new = False
if "checksum" in kwargs:
hdu._output_checksum = kwargs["checksum"]
# check in the case there is extra space after the last HDU or
# corrupted HDU
except (VerifyError, ValueError) as exc:
warnings.warn(
"Error validating header for HDU #{} (note: Astropy "
"uses zero-based indexing).\n{}\n"
"There may be extra bytes after the last HDU or the "
"file is corrupted.".format(len(self), indent(str(exc))),
VerifyWarning,
)
del exc
self._read_all = True
return False
finally:
compressed.COMPRESSION_ENABLED = saved_compression_enabled
self._in_read_next_hdu = False
return True
def _verify(self, option="warn"):
errs = _ErrList([], unit="HDU")
# the first (0th) element must be a primary HDU
if (
len(self) > 0
and (not isinstance(self[0], PrimaryHDU))
and (not isinstance(self[0], _NonstandardHDU))
):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = "Fixed by inserting one as 0th HDU."
def fix(self=self):
self.insert(0, PrimaryHDU())
err = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)
errs.append(err)
if len(self) > 1 and (
"EXTEND" not in self[0].header or self[0].header["EXTEND"] is not True
):
err_text = (
"Primary HDU does not contain an EXTEND keyword "
"equal to T even though there are extension HDUs."
)
fix_text = "Fixed by inserting or updating the EXTEND keyword."
def fix(header=self[0].header):
naxis = header["NAXIS"]
if naxis == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(naxis)
header.set("EXTEND", value=True, after=after)
errs.append(
self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)
)
# each element calls their own verify
for idx, hdu in enumerate(self):
if idx > 0 and (not isinstance(hdu, ExtensionHDU)):
err_text = f"HDUList's element {str(idx)} is not an extension HDU."
err = self.run_option(option, err_text=err_text, fixable=False)
errs.append(err)
else:
result = hdu._verify(option)
if result:
errs.append(result)
return errs
def _flush_update(self):
"""Implements flushing changes to a file in update mode."""
for hdu in self:
# Need to all _prewriteto() for each HDU first to determine if
# resizing will be necessary
hdu._prewriteto(checksum=hdu._output_checksum, inplace=True)
try:
self._wasresized()
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
hdu._header._modified = False
finally:
for hdu in self:
hdu._postwriteto()
def _flush_resize(self):
"""
Implements flushing changes in update mode when parts of one or more HDU
need to be resized.
"""
old_name = self._file.name
old_memmap = self._file.memmap
name = _tmp_name(old_name)
if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
if self._file.compression == "gzip":
new_file = gzip.GzipFile(name, mode="ab+")
elif self._file.compression == "bzip2":
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
new_file = bz2.BZ2File(name, mode="w")
else:
new_file = name
with self.fromfile(new_file, mode="append") as hdulist:
for hdu in self:
hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith("win"):
# Collect a list of open mmaps to the data; this well be
# used later. See below.
mmaps = [
(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self)
if hdu._has_data
]
hdulist._file.close()
self._file.close()
if sys.platform.startswith("win"):
# Close all open mmaps to the data. This is only necessary on
# Windows, which will not allow a file to be renamed or deleted
# until all handles to that file have been closed.
for idx, mmap, arr in mmaps:
if mmap is not None:
mmap.close()
os.remove(self._file.name)
# reopen the renamed new file with "update" mode
os.rename(name, old_name)
os.chmod(old_name, old_mode)
if isinstance(new_file, gzip.GzipFile):
old_file = gzip.GzipFile(old_name, mode="rb+")
else:
old_file = old_name
ffo = _File(old_file, mode="update", memmap=old_memmap)
self._file = ffo
for hdu in self:
# Need to update the _file attribute and close any open mmaps
# on each HDU
if hdu._has_data and _get_array_mmap(hdu.data) is not None:
del hdu.data
hdu._file = ffo
if sys.platform.startswith("win"):
# On Windows, all the original data mmaps were closed above.
# However, it's possible that the user still has references to
# the old data which would no longer work (possibly even cause
# a segfault if they try to access it). This replaces the
# buffers used by the original arrays with the buffers of mmap
# arrays created from the new file. This seems to work, but
# it's a flaming hack and carries no guarantees that it won't
# lead to odd behavior in practice. Better to just not keep
# references to data from files that had to be resized upon
# flushing (on Windows--again, this is no problem on Linux).
for idx, mmap, arr in mmaps:
if mmap is not None:
# https://github.com/numpy/numpy/issues/8628
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
arr.data = self[idx].data.data
del mmaps # Just to be sure
else:
# The underlying file is not a file object, it is a file like
# object. We can't write out to a file, we must update the file
# like object in place. To do this, we write out to a temporary
# file, then delete the contents in our file like object, then
# write the contents of the temporary file to the now empty file
# like object.
self.writeto(name)
hdulist = self.fromfile(name)
ffo = self._file
ffo.truncate(0)
ffo.seek(0)
for hdu in hdulist:
hdu._writeto(ffo, inplace=True, copy=True)
# Close the temporary file and delete it.
hdulist.close()
os.remove(hdulist._file.name)
# reset the resize attributes after updating
self._resize = False
self._truncate = False
for hdu in self:
hdu._header._modified = False
hdu._new = False
hdu._file = ffo
def _wasresized(self, verbose=False):
"""
Determine if any changes to the HDUList will require a file resize
when flushing the file.
Side effect of setting the objects _resize attribute.
"""
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
nbytes = len(str(hdu._header))
if nbytes != (hdu._data_offset - hdu._header_offset):
self._resize = True
self._truncate = False
if verbose:
print("One or more header is resized.")
break
# Data:
if not hdu._has_data:
continue
nbytes = hdu.size
nbytes = nbytes + _pad_length(nbytes)
if nbytes != hdu._data_size:
self._resize = True
self._truncate = False
if verbose:
print("One or more data area is resized.")
break
if self._truncate:
try:
self._file.truncate(hdu._data_offset + hdu._data_size)
except OSError:
self._resize = True
self._truncate = False
return self._resize
|
578f045cd5e0f95895b137e63666ae2c5288322facc04027bfdf583bbdbc68be | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import numpy as np
import pytest
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.io.fits.column import NUMPY2FITS, ColumnAttribute, Delayed
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from astropy.table import Table
from astropy.units import Unit, UnitsWarning, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from .conftest import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == "float32" or bb.dtype.name == "float32":
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.0
if np.any(mask0):
if diff[mask0].max() != 0.0:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == "S":
fielda = decode_ascii(fielda)
if fieldb.dtype.char == "S":
fieldb = decode_ascii(fieldb)
if not isinstance(fielda, type(fieldb)) and not isinstance(
fieldb, type(fielda)
):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f"field {i} type differs")
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
elif isinstance(fielda, fits.column._VLF) or isinstance(
fieldb, fits.column._VLF
):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f"fielda[{row}]: {fielda[row]}")
print(f"fieldb[{row}]: {fieldb[row]}")
print(f"field {i} differs in row {row}")
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [
k for k, v in fits.Column.__dict__.items() if isinstance(v, ColumnAttribute)
]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr["FILENAME"] = "labq01i3q_rawtag.fits"
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert thdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self, home_is_data):
# open some existing FITS files:
tt = fits.open(self.data("tb.fits"))
fd = fits.open(self.data("test0.fits"))
# create some local arrays
a1 = chararray.array(["abc", "def", "xx"])
r1 = np.array([11.0, 12.0, 13.0], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name="abc", format="3A", array=a1)
c2 = fits.Column(name="def", format="E", array=r1)
a3 = np.array([3, 4, 5], dtype="i2")
c3 = fits.Column(name="xyz", format="I", array=a3)
a4 = np.array([1, 2, 3], dtype="i2")
c4 = fits.Column(name="t1", format="I", array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype="c8")
c5 = fits.Column(name="t2", format="C", array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name="t3", format="X", array=a6)
a7 = np.array([101, 102, 103], dtype="i4")
c7 = fits.Column(name="t4", format="J", array=a7)
a8 = np.array(
[
[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
],
dtype=np.uint8,
)
c8 = fits.Column(name="t5", format="11X", array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view("bool")).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field("abc")) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp("tableout1.fits"), overwrite=True)
with fits.open(self.temp("tableout1.fits")) as f2:
exp = [True, True, False, True, False, True, True, True, False, False, True]
temp = f2[1].data.field(7)
assert (temp[0] == exp).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp("tableout2.fits"), "append")
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data("tb.fits"))
assert t[1].header["tform1"] == "1J"
info = {
"name": ["c1", "c2", "c3", "c4"],
"format": ["1J", "3A", "1E", "1L"],
"unit": ["", "", "", ""],
"null": [-2147483647, "", "", ""],
"bscale": ["", "", 3, ""],
"bzero": ["", "", 0.4, ""],
"disp": ["I11", "A3", "G15.7", "L6"],
"start": ["", "", "", ""],
"dim": ["", "", "", ""],
"coord_inc": ["", "", "", ""],
"coord_type": ["", "", "", ""],
"coord_unit": ["", "", "", ""],
"coord_ref_point": ["", "", "", ""],
"coord_ref_value": ["", "", "", ""],
"time_ref_pos": ["", "", "", ""],
}
assert t[1].columns.info(output=False) == info
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field("c4")[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, "c4")) == "[84 84]"
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data("ascii.fits"))
ra1 = np.rec.array(
[
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345),
],
names="c1, c2",
)
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names="c1, c2")
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array(
[(10.123000144958496, 37), (15.609999656677246, 17), (345.0, 345)],
names="c1, c2",
)
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(["abcd", "def"])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name="abc", format="A3", start=19, array=a1)
c2 = fits.Column(name="def", format="E", start=3, array=r1)
c3 = fits.Column(name="t1", format="I", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert dict(hdu.data.dtype.fields) == {
"abc": (np.dtype("|S3"), 18),
"def": (np.dtype("|S15"), 2),
"t1": (np.dtype("|S10"), 21),
}
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11.0, 12.0])
c2 = fits.Column(name="def", format="D", array=r1, bscale=2.3, bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with open(self.temp("toto.fits")) as f:
assert "4.95652173913043548D+00" in f.read()
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name="t2", format="I2", array=[91, 92, 93])
c2 = fits.Column(name="t4", format="I5", array=[91, 92, 93])
c3 = fits.Column(name="t8", format="I10", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype="uint8")
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
hduL = fits.open(self.temp("testendian.fits"))
rfiHDU = hduL["RFI"]
data = rfiHDU.data
channelsOut = data.field("Channels")[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1.0, 2.0, 3.0, 4.0]
a1 = np.array(a, dtype="<f8")
a2 = np.array(a, dtype=">f8")
col1 = fits.Column(name="a", format="D", array=a1)
col2 = fits.Column(name="b", format="D", array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data["a"] == a1).all()
assert (tbhdu.data["b"] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
with fits.open(self.temp("testendian.fits")) as hdul:
assert (hdul[1].data["a"] == a2).all()
assert (hdul[1].data["b"] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float32,a10",
names="order,name,mag,Sp",
)
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "S20", "float32", "S10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "U20", "float32", "U10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == "Serius"
assert hdu.data[1][1] == "Canopys"
assert (hdu.data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == "A1V"
assert hdu.data[1][3] == "F0Ib"
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert (hdul[1].data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == "Serius"
assert hdul[1].data[1][1] == "Canopys"
assert (
hdul[1].data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)
).all()
assert hdul[1].data[0][3] == "A1V"
assert hdul[1].data[1][3] == "F0Ib"
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array(
[(1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib")],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data("tb.fits")) as h:
data = h[1].data
new_data = np.array([(3, "qwe", 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith("FITS_rec(")
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert t1[1].columns._arrays[1] is t1[1].columns.columns[1].array
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp("newtable.fits"))
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 19, "8R x 5C", "[10A, J, 10A, 5E, L]", ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
("NGC5", 412, "", z, False),
("NGC6", 434, "", z, True),
("NGC7", 408, "", z, False),
("NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
col = fits.Column(name="a", array=np.array([1, 2]), format="K")
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ["target", "V_mag", "a"]
array = np.rec.array(
[("NGC1001", 11.1, 1), ("NGC1002", 12.3, 2), ("NGC1003", 15.2, 0)],
formats="a20,f4,i8",
)
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
tbhdu.columns.del_col("flag")
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z),
("NGC2", 334, "", z),
("NGC3", 308, "", z),
("NCG4", 317, "", z),
],
formats="a10,u4,a10,5f4",
)
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col("counts")
tbhdu.columns.del_col("notes")
assert tbhdu.columns.names == ["target", "spectrum"]
array = np.rec.array(
[("NGC1", z), ("NGC2", z), ("NGC3", z), ("NCG4", z)], formats="a10,5f4"
)
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
tbhdu.columns.del_col("V_mag")
assert tbhdu.columns.names == ["target"]
array = np.rec.array([("NGC1001",), ("NGC1002",), ("NGC1003",)], formats="a20")
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target1", format="10A", array=names)
c2 = fits.Column(name="counts1", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes1", format="A10")
c4 = fits.Column(name="spectrum1", format="5E")
c5 = fits.Column(name="flag1", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp("newtable.fits"))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
columns_info = "[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]"
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 30, "4R x 10C", columns_info, ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
assert hdu.columns.names == [
"target",
"counts",
"notes",
"spectrum",
"flag",
"target1",
"counts1",
"notes1",
"spectrum1",
"flag1",
]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {"a": 2, "b": "b", "c": 2.3}
data = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "S1"), ("c", float)],
)
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
header = hdul[1].header
assert header["TNULL1"] == 2
assert header["TNULL2"] == "b"
assert header["TNULL3"] == 2.3
def test_multidimension_table_from_numpy_rec_columns(self):
"""Regression test for https://github.com/astropy/astropy/issues/5280
and https://github.com/astropy/astropy/issues/5287
multidimentional tables can now be written with the correct TDIM.
Author: Stephen Bailey.
"""
dtype = [
("x", (str, 5)), # 1D column of 5-character strings
("y", (str, 3), (4,)), # 2D column; each row is four 3-char strings
]
data = np.zeros(2, dtype=dtype)
data["x"] = ["abcde", "xyz"]
data["y"][0] = ["A", "BC", "DEF", "123"]
data["y"][1] = ["X", "YZ", "PQR", "999"]
table = Table(data)
# Test convenience functions io.fits.writeto / getdata
fits.writeto(self.temp("test.fits"), data)
dx = fits.getdata(self.temp("test.fits"))
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test fits.BinTableHDU(data) and avoid convenience functions
hdu0 = fits.PrimaryHDU()
hdu1 = fits.BinTableHDU(data)
hx = fits.HDUList([hdu0, hdu1])
hx.writeto(self.temp("test2.fits"))
fx = fits.open(self.temp("test2.fits"))
dx = fx[1].data
fx.close()
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test Table write and read
table.write(self.temp("test3.fits"))
tx = Table.read(self.temp("test3.fits"), character_as_bytes=False)
assert table["x"].dtype == tx["x"].dtype
assert table["y"].dtype == tx["y"].dtype
assert np.all(table["x"] == tx["x"]), f"x: {table['x']} != {tx['x']}"
assert np.all(table["y"] == tx["y"]), f"y: {table['y']} != {tx['y']}"
def test_mask_array(self):
t = fits.open(self.data("table.fits"))
tbdata = t[1].data
mask = tbdata.field("V_mag") > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp("newtable.fits"))
hdul = fits.open(self.temp("newtable.fits"))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
row = t1[1].data[2]
assert row["counts"] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ""
assert (c == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)).all()
row["counts"] = 310
assert row["counts"] == 310
row[1] = 315
assert row["counts"] == 315
assert row[1:4]["counts"] == 315
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
assert row["counts"] == 300
row[1:4][0] = 400
assert row[1:4]["counts"] == 400
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]["counts"] == 500
row[1:4:2][0] = 300
assert row[1:4]["counts"] == 300
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
assert row[1:4].field(0) == 300
assert row[1:4].field("counts") == 300
pytest.raises(KeyError, row[1:4].field, "flag")
row[1:4].setfield("counts", 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, "flag", False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name="target", format="10A")
c2 = fits.Column(name="counts", format="J", unit="DN")
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L")
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = (
"NGC1",
312,
"A Note",
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True,
)
# Test assigning data to a tables row using a list
tbhdu.data[3] = [
"JIM1",
"33",
"A Note",
np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32),
True,
]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == "NGC1"
assert tbhdu.columns.columns[2].array[0] == ""
assert (
tbhdu.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == "JIM1"
assert tbhdu.columns.columns[2].array[3] == "A Note"
assert (
tbhdu.columns.columns[3].array[3]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[3] == np.True_), (bool, np.bool_)
)
and v
)
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.data._coldefs._arrays[0]
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns.columns[0].array
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns._arrays[0]
)
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == "NGC1"
assert tbhdu2.columns.columns[2].array[0] == ""
assert (
tbhdu2.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == "NGC5"
assert tbhdu2.columns.columns[2].array[4] == ""
assert (
tbhdu2.columns.columns[3].array[4]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[4] == np.False_), (bool, np.bool_)
)
and v
)
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ""
assert tbhdu2.columns.columns[2].array[8] == ""
assert (
tbhdu2.columns.columns[3].array[8]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[8] == np.False_), (bool, np.bool_)
)
and v
)
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert id(coldefs.columns[0].array) != id(tbhdu.columns.columns[0].array)
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.data._coldefs._arrays[0]
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns.columns[0].array
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns._arrays[0]
)
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = hducls(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = hducls(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert "EXTVER" not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header["EXTVER"] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header["EXTVER"] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header["EXTVER"] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name="spam", format="E", array=[42.0])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name="flag", format="2L", array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (
tbhdu1.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu1.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (
tbhdu.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data("table.fits"))
assert (tbdata.V_mag == tbdata.field("V_mag")).all()
assert (tbdata.V_mag == tbdata["V_mag"]).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data("tb.fits"))
for col in ("c1", "c2", "c3", "c4"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data("ascii.fits"))
for col in ("a", "b"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(
name="x",
format="PI()",
array=np.array([[45, 56], [11, 12, 13]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data["x"]) == type(hdu.data.x)
assert (hdu.data["x"][0] == hdu.data.x[0]).all()
assert (hdu.data["x"][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data("zerowidth.fits"))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert "ORBPARM" in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.writeto(self.temp("newtable.fits"))
hdul.close()
hdul = fits.open(self.temp("newtable.fits"))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert "ORBPARM" in tbhdu.columns.names
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.close()
def test_string_column_padding(self):
a = ["img1", "img2", "img3a", "p"]
s = (
"img1\x00\x00\x00\x00\x00\x00"
"img2\x00\x00\x00\x00\x00\x00"
"img3a\x00\x00\x00\x00\x00"
"p\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
acol = fits.Column(name="MEMNAME", format="A10", array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode("raw-unicode-escape") == s
ahdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s.replace(
"\x00", " "
)
assert (hdul[1].data["MEMNAME"] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[
([0, 1, 2, 3, 4, 5], "row1" * 2),
([6, 7, 8, 9, 0, 1], "row2" * 2),
([2, 3, 4, 5, 6, 7], "row3" * 2),
],
formats="6i4,a8",
)
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits"), mode="update") as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header["TDIM1"] = "(2,3)"
hdul[1].header["TDIM2"] = "(4,2)"
with fits.open(self.temp("newtable.fits")) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (
c1
== np.array(
[
[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]],
]
)
).all()
assert (
c2 == np.array([["row1", "row1"], ["row2", "row2"], ["row3", "row3"]])
).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", 4)])
data["x"] = 1, 2, 3
data["s"] = "ok"
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", (4, 3))])
data["x"] = 1, 2, 3
data["s"] = "ok"
del t
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1,), (2,)], dtype=([("x", "i4", (1,))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("onedtable.fits"))
with fits.open(self.temp("onedtable.fits")) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header["TDIM1"] == "(1)"
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b"abcd", b"efgh"], [b"ijkl", b"mnop"], [b"qrst", b"uvwx"]]
arr = np.array(
[(data,), (data,), (data,), (data,), (data,)], dtype=[("S", "(3, 2)S4")]
)
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(4,2,3)"
assert tbhdu2.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
assert np.all(tbhdu2.data["S"] == tbhdu.data["S"])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b"ab", b"cd"], [b"ef", b"gh"], [b"ij", b"kl"]]
arr2 = [1, 2, 3, 4, 5]
arr = np.array(
[(arr1, arr2), (arr1, arr2)], dtype=[("a", "(3, 2)S2"), ("b", "5i8")]
)
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp("test.fits"), "wb") as f:
f.write(raw_bytes.replace(b"(2,2,3)", b"(2,2,2)"))
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(2,2,2)"
assert tbhdu2.header["TFORM1"] == "12A"
for row in tbhdu2.data:
assert np.all(row["a"] == [["ab", "cd"], ["ef", "gh"]])
assert np.all(row["b"] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [["abc", "def", "ghi"], ["jkl", "mno", "pqr"], ["stu", "vwx", "yz "]]
recarr = np.rec.array([(data,), (data,)], formats=["(3,3)S3"])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
with fits.open(self.temp("test.fits")) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(["a", "b"], dtype="|S1")
arrb = np.array([["a", "bc"], ["cd", "e"]], dtype="|S2")
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name="str", format="1A", array=arra),
fits.Column(name="strarray", format="4A", dim="(2,2)", array=arrb),
fits.Column(name="intarray", format="4I", dim="(2, 2)", array=arrc),
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data["str"].encode("ascii") == arra).all()
assert (h[1].data["strarray"].encode("ascii") == arrb).all()
assert (h[1].data["intarray"] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [
fits.Column(name="a", format="20I", dim="(2,2)", array=arra),
fits.Column(name="b", format="4I", dim="(2,2)", array=arrb),
]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM1"] == "20I"
assert h[1].header["TFORM2"] == "4I"
assert h[1].header["TDIM1"] == h[1].header["TDIM2"] == "(2,2)"
assert (h[1].data["a"] == arra).all()
assert (h[1].data["b"] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(
VerifyError, fits.Column, name="a", format="2I", dim="(2,2)", array=arra
)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data("tdim.fits")) as hdulist:
assert hdulist[1].data["V_mag"].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
targets = data.field("target")
s = data[:]
assert (s.field("target") == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field("target") == targets[:n]).all()
s = data[n:]
assert (s.field("target") == targets[n:]).all()
s = data[::2]
assert (s.field("target") == targets[::2]).all()
s = data[::-1]
assert (s.field("target") == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data("table.fits")) as hdu:
data = hdu[1].data
data["V_mag"] = 0
assert np.all(data["V_mag"] == 0)
data["V_mag"] = 1
assert np.all(data["V_mag"] == 1)
for container in (list, tuple, np.array):
data["V_mag"] = container([1, 2, 3])
assert np.array_equal(data["V_mag"], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data("table.fits"), mode="readonly") as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array(
[("a", [1, 2, 3, 4], 0.1), ("b", [5, 6, 7, 8], 0.2)], formats="a1,4i4,f8"
)
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name="c0", format="L", array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name="c2", format="B", array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name="c3", format="I", array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name="c4", format="J", array=a4)
a5 = np.array(["a", "abc", "ab"])
c5 = fits.Column(name="c5", format="A3", array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name="c6", format="D", array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j], dtype=np.complex128)
c7 = fits.Column(name="c7", format="M", array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name="c8", format="PJ()", array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp("data.txt")
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name="names", format="I", array=[1])
c2 = fits.Column(name="formats", format="I", array=[2])
c3 = fits.Column(name="other", format="I", array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ["names", "formats", "other"]
assert t.data.formats == ["I"] * 3
assert (t.data["names"] == [1]).all()
assert (t.data["formats"] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats="|b1,|b1")
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp("table.fits"))
data = fits.getdata(self.temp("table.fits"), ext=1)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[("a", "?")])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data["a"] == arr["a"]).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column("F1", "L", array=[True, False])
c2 = fits.Column("F2", "L", array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp("table.fits"))
with fits.open(self.temp("table.fits"), mode="update") as hdul:
hdul[1].data["F1"][1] = True
hdul[1].data["F2"][0] = True
with fits.open(self.temp("table.fits")) as hdul:
assert (hdul[1].data["F1"] == [True, True]).all()
assert (hdul[1].data["F2"] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column(
"F1",
"A3",
null="---",
array=np.array(["1.0", "2.0", "---", "3.0"]),
ascii=True,
)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp("test.fits"))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp("test.fits"), mode="update") as h:
h[1].header["TFORM1"] = "E3"
del h[1].header["TNULL1"]
with fits.open(self.temp("test.fits")) as h:
pytest.raises(ValueError, lambda: h[1].data["F1"])
try:
with fits.open(self.temp("test.fits")) as h:
h[1].data["F1"]
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data"
)
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = " "
c1 = fits.Column(
"F1",
format="I8",
null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True,
)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp("ascii_null.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null.fits"), mode="r+") as h:
nulled = h.read().replace("2 ", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null.fits"), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = "NaN"
c2 = fits.Column(
"F1",
format="F12.8",
null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True,
)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp("ascii_null2.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null2.fits"), mode="r+") as h:
nulled = h.read().replace("3.00000000", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null2.fits"), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("tb.fits")) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["NAXIS"] == 2
assert h[1].header["NAXIS1"] == 12
assert h[1].header["NAXIS2"] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data("table.fits")) as h:
h[1].writeto(self.temp("test.fits"))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert "data" not in h[1].__dict__
with fits.open(self.data("table.fits")) as h1:
with fits.open(self.temp("test.fits")) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data("table.fits"))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data("tb.fits")) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata["c1"] == tbdata2["c1"])
assert np.all(tbdata["c2"] == tbdata2["c2"])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(
tbdata["c3"].astype(np.float32) == tbdata2["c3"].astype(np.float32)
)
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata["c4"], "T", "F") == tbdata2["c4"])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match="Field 2 has a repeat count of 0"):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[("a", "i8"), ("b", "S64"), ("c", ("i4", (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header["NAXIS1"] == 96
assert hdu.header["NAXIS2"] == 0
assert hdu.header["TDIM3"] == "(2,3)"
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data("random_groups.fits"))["DATA"]
col = fits.Column(name="TEST", array=data, dim="(3,1,128,1,1)", format="1152E")
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[1].data["TEST"] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data("tb.fits"))
data2 = fits.getdata(self.data("tb.fits"))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1) :] = data2
mask = merged["c1"] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data("tb.fits")))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([("abc",)], dtype=[("a", "S3")])
fits.writeto(self.temp("test.fits"), data)
with fits.open(self.temp("test.fits"), mode="update") as hdul:
hdul[1].data["a"][0] = "XYZ"
assert hdul[1].data["a"][0] == "XYZ"
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].data["a"][0] == "XYZ"
# Test update but with a non-trivial TDIMn
data = np.array(
[([["abc", "def", "geh"], ["ijk", "lmn", "opq"]],)],
dtype=[("a", ("S3", (2, 3)))],
)
fits.writeto(self.temp("test2.fits"), data)
expected = [["abc", "def", "geh"], ["ijk", "XYZ", "opq"]]
with fits.open(self.temp("test2.fits"), mode="update") as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data["a"][0, 1, 1] = "XYZ"
assert np.all(hdul[1].data["a"][0] == expected)
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
assert np.all(hdul[1].data["a"][0] == expected)
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting("FITS_rec"):
readfile(self.data("memtest.fits"))
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
@pytest.mark.slow
def test_reference_leak2(self, tmp_path):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_connect import TestMultipleHDU
from .test_core import TestCore
t1 = TestCore()
t1.setup_method()
try:
with _refcounting("FITS_rec"):
t1.test_add_del_columns2()
finally:
t1.teardown_method()
del t1
t2 = self.__class__()
for test_name in [
"test_recarray_to_bintablehdu",
"test_numpy_ndarray_to_bintablehdu",
"test_new_table_from_recarray",
"test_new_fitsrec",
]:
t2.setup_method()
try:
with _refcounting("FITS_rec"):
getattr(t2, test_name)()
finally:
t2.teardown_method()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting("FITS_rec"):
t3.test_read(tmp_path)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data("table.fits")) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
msg = (
r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\."
)
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name="A", format="1J", bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
# Test that the file wrote out correctly
with fits.open(self.temp("test.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == data)
# Test updating the unsigned int data
hdu.data["A"][0] = 99
hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(
name="c1",
array=np.array([1], dtype=">i2"),
format="1I",
bscale=1,
bzero=32768,
)
S = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data["c1"][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data["c1"] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data["c1"][0] = 10
assert X[1].data["c1"][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data["c1"][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -(2**22), 10, 2**23], dtype="i4")
i10 = np.array([2**8, 2**31 - 1, -(2**29), 30, 2**31 - 1], dtype="i8")
i20 = np.array([2**16, 2**63 - 1, -(2**63), 40, 2**63 - 1], dtype="i8")
i02 = np.array([2**8, 2**13, -(2**9), 50, 2**13], dtype="i2")
t0 = Table([i08, i08 * 2, i10, i20, i02])
t1 = Table.read(self.data("ascii_i4-i20.fits"))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
def test_ascii_floattypes(self):
"""Test different float formats."""
col1 = fits.Column(
name="a", format="D", array=np.array([11.1, 12.2]), ascii=True
)
col2 = fits.Column(
name="b", format="D16", array=np.array([15.5, 16.6]), ascii=True
)
col3 = fits.Column(
name="c", format="D16.7", array=np.array([1.1, 2.2]), ascii=True
)
hdu = fits.TableHDU.from_columns([col1, col2, col3])
hdu.writeto(self.temp("foo.fits"))
with fits.open(self.temp("foo.fits"), memmap=False) as hdul:
assert comparerecords(hdul[1].data, hdu.data)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert (
len(objgraph.by_type(type_)) <= refcount
), "More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[[0] * 1571] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as toto:
q = toto[1].data.field("QUAL_SPE")
assert (q[0][4:8] == np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith("J(1571)")
for code in ("PJ()", "QJ()"):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name="TESTVLF", format=format_code, array=arr)
col2 = fits.Column(name="TESTSCA", format="J", array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data["TESTSCA"]) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data["TESTVLF"]) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data["TESTVLF"][0] == arr[0]).all()
assert (tb_hdu.data["TESTVLF"][9] == arr[9]).all()
assert (tb_hdu.data["TESTVLF"][10] == ([0] * 10)).all()
assert (tb_hdu.data["TESTVLF"][-1] == ([0] * 10)).all()
for code in ("PJ()", "QJ()"):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array(
[np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array(
[np.array(["a", "b", "c"]), np.array(["d", "e"]), np.array(["f"])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ["a", "ab", "abc"]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[np.arange(1572)] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
data = fits.getdata(self.temp("toto.fits"))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data["QUAL_SPE"], col.array):
assert (row_a == row_b).all()
for code in ("PJ()", "QJ()"):
test(code)
@pytest.mark.skipif(
not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == "win32",
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column("test", format="J", array=np.arange(255))
c1 = fits.Column("A", format="PJ", array=arr1)
c2 = fits.Column("B", format="PJ", array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp("test.fits"), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM2"] == "PJ(255)"
assert h[2].header["TFORM2"] == "PJ(255)"
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp("test.fits")) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp("test2.fits"))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp("test2.fits"), mode="append") as new_hdul:
for _ in range(2):
with fits.open(self.temp("test.fits")) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp("test2.fits")) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data("theap-gap.fits"))
data = hdul[1].data
assert data.shape == (500,)
assert data["i"][497] == 497
assert np.array_equal(data["arr"][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name="var",
format="PI()",
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data["var"].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data("variable_length_table.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data["var"].tolist() == [[45, 56], [11, 12, 13]]
@pytest.mark.skipif(sys.maxsize < 2**32, reason="requires 64-bit system")
@pytest.mark.skipif(sys.platform == "win32", reason="Cannot test on Windows")
@pytest.mark.hugemem
def test_heapsize_P_limit(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10812
Check if the error is raised when the heap size is bigger than what can be
indexed with a 32 bit signed int.
"""
# a matrix with variable length array elements is created
nelem = 2**28
matrix = np.zeros(1, dtype=np.object_)
matrix[0] = np.arange(0.0, float(nelem + 1))
col = fits.Column(name="MATRIX", format=f"PD({nelem})", unit="", array=matrix)
t = fits.BinTableHDU.from_columns([col])
t.name = "MATRIX"
with pytest.raises(
ValueError, match="Please consider using the 'Q' format for your file."
):
t.writeto(self.temp("matrix.fits"))
def test_empty_vla_raw_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/12881
Check if empty vla are correctly read.
"""
columns = [
fits.Column(name="integer", format="B", array=(1, 2)),
fits.Column(name="empty", format="PJ", array=([], [])),
]
fits.BinTableHDU.from_columns(columns).writeto(self.temp("bug.fits"))
with fits.open(self.temp("bug.fits")) as hdu:
# We can't compare the whole array since the _VLF is an array of
# objects, hence we compare elementwise
for i in range(len(hdu[1].data["empty"])):
assert np.array_equal(
hdu[1].data["empty"][i], np.array([], dtype=np.int32)
)
def test_multidim_VLA_tables(self):
"""
Check if multidimensional VLF are correctly write and read.
See https://github.com/astropy/astropy/issues/12860
and https://github.com/astropy/astropy/issues/7810
"""
a = np.arange(5)
b = np.arange(7)
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(7)", dim="(7,1)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdus:
print(hdus[1].data["test"][0])
assert hdus[1].columns.formats == ["PD(7)"]
assert np.array_equal(
hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0, 3.0, 4.0]])
)
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
)
a = np.arange(10).reshape((5, 2))
b = np.arange(14).reshape((7, 2))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(14)", dim="(2,7)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(14)"]
assert np.array_equal(
hdus[1].data["test"][0],
np.array([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]),
)
assert np.array_equal(
hdus[1].data["test"][1],
np.array(
[
[0.0, 1.0],
[2.0, 3.0],
[4.0, 5.0],
[6.0, 7.0],
[8.0, 9.0],
[10.0, 11.0],
[12.0, 13.0],
]
),
)
a = np.arange(3).reshape((1, 3))
b = np.arange(6).reshape((2, 3))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(6)", dim="(3,2)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(6)"]
assert np.array_equal(hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0]]))
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]])
)
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column("TEST", np.dtype(recformat))
c.format == fitsformat
c = fits.Column("TEST", recformat)
c.format == fitsformat
c = fits.Column("TEST", fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column("TEST", "I4")
assert c.format == "I4"
assert c.format.format == "I"
assert c.format.width == 4
c = fits.Column("TEST", "F15.8")
assert c.format == "F15.8"
assert c.format.format == "F"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "E15.8")
assert c.format.format == "E"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "D15.8")
assert c.format.format == "D"
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column("TEST", "F10.0")
assert c.format.format == "F"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "E10.0")
assert c.format.format == "E"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "D10.0")
assert c.format.format == "D"
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column("TEST", "I")
assert c.format == "I"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I", ascii=True)
assert c.format == "I10"
assert c.format.recformat == "i4"
# With specified widths, integer precision should be set appropriately
c = fits.Column("TEST", "I4", ascii=True)
assert c.format == "I4"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I9", ascii=True)
assert c.format == "I9"
assert c.format.recformat == "i4"
c = fits.Column("TEST", "I12", ascii=True)
assert c.format == "I12"
assert c.format.recformat == "i8"
c = fits.Column("TEST", "E")
assert c.format == "E"
assert c.format.recformat == "f4"
c = fits.Column("TEST", "E", ascii=True)
assert c.format == "E15.7"
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column("TEST", "F")
assert c.format == "F16.7"
c = fits.Column("TEST", "D")
assert c.format == "D"
assert c.format.recformat == "f8"
c = fits.Column("TEST", "D", ascii=True)
assert c.format == "D25.17"
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column("TEST", "F5.0", array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["TFORM1"] == "F5.0"
assert hdul[1].data["TEST"].dtype == np.dtype("float64")
assert np.all(hdul[1].data["TEST"] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, "TEST")
assert raw.tobytes() == b" 1. 2. 3."
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[("A", "<u4", (2,)), ("B", ">u2")])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs["A"].bzero
assert 2**15 == col_defs["B"].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(
UserWarning,
match=r"Field 2 has a repeat count of 0 in its format code",
):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name="a", format="D")
b = fits.Column(name="b", format="D")
cols = fits.ColDefs([a, b])
assert cols["a"] == cols[0]
assert cols["b"] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns(
[fits.Column("a", format="D"), fits.Column("b", format="D")]
)
b = table.columns["b"]
table.columns.del_col("b")
assert table.data.dtype.names == ("a",)
b.name = "HELLO"
assert b.name == "HELLO"
assert "TTYPE2" not in table.header
assert table.header["TTYPE1"] == "a"
assert table.columns.names == ["a"]
with pytest.raises(KeyError):
table.columns["b"]
# Make sure updates to the remaining column still work
table.columns.change_name("a", "GOODBYE")
with pytest.raises(KeyError):
table.columns["a"]
assert table.columns["GOODBYE"].name == "GOODBYE"
assert table.data.dtype.names == ("GOODBYE",)
assert table.columns.names == ["GOODBYE"]
assert table.data.columns.names == ["GOODBYE"]
table.columns["GOODBYE"].name = "foo"
with pytest.raises(KeyError):
table.columns["GOODBYE"]
assert table.columns["foo"].name == "foo"
assert table.data.dtype.names == ("foo",)
assert table.columns.names == ["foo"]
assert table.data.columns.names == ["foo"]
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column("xcol", format="5X", array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column("pcol", format="PJ", array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column("qcol", format="QJ", array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format="I", array=[1, 2, 3, 4, 5])
assert "Column name must be a string able to fit" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column(
"col",
format=0,
null="Nan",
disp=1,
coord_type=1,
coord_unit=2,
coord_inc="1",
time_ref_pos=1,
coord_ref_point="1",
coord_ref_value="1",
)
err_msgs = [
"keyword arguments to Column were invalid",
"TFORM",
"TNULL",
"TDISP",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
]
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="B", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="-56", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(
err.value
)
@pytest.mark.parametrize(
"keys",
[
{"TFORM": "Z", "TDISP": "E"},
{"TFORM": "2", "TDISP": "2E"},
{"TFORM": 3, "TDISP": 6.3},
{"TFORM": float, "TDISP": np.float64},
{"TFORM": "", "TDISP": "E.5"},
],
)
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("col", format=keys["TFORM"], disp=keys["TDISP"])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name="a", array=x, format="E")
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header["TTYPE1"]
hdu.columns[0].name = "b"
def test_table_to_hdu():
from astropy.table import Table
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
table.meta["foo"] = "bar"
with pytest.warns(
UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.BinTableHDU(table, header=fits.Header({"TEST": 1}))
assert len(w) == 1
for name in "abc":
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert hdu.header["FOO"] == "bar"
assert hdu.header["TEST"] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)], dtype=[("x", float), ("y", int)]).view(
fits.FITS_rec
)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmp_path):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr["TUNIT1"] = "pixel"
hdr["TUNIT2"] = "m"
hdr["TUNIT3"] = "m"
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr["TCTYP2"] = "RA---TAN"
hdr["TCTYP3"] = "ANGLE"
hdr["TCRVL2"] = -999.0
hdr["TCRVL3"] = -999.0
hdr["TCRPX2"] = 1.0
hdr["TCRPX3"] = 1.0
hdr["TALEN2"] = 16384
hdr["TALEN3"] = 1024
hdr["TCUNI2"] = "angstrom"
hdr["TCUNI3"] = "deg"
# Other non-relevant keywords
hdr["RA"] = 1.5
hdr["DEC"] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special"
)
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == "s"
assert hdu.columns[1].unit == "pixel"
assert hdu.columns[2].unit is None
assert hdu.header["TUNIT1"] == "s"
assert hdu.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert "TCTYP1" not in hdu.header
assert hdu.header["TCTYP2"] == "RA---TAN"
assert hdu.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu.header["RA"] == 1.5
assert hdu.header["DEC"] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attributes to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmp_path / "test.fits"
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == "s"
assert hdu2.columns[1].unit == "pixel"
assert hdu2.columns[2].unit is None
assert hdu2.header["TUNIT1"] == "s"
assert hdu2.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == "RA---TAN"
assert hdu2.columns[2].coord_type == "ANGLE"
assert "TCTYP1" not in hdu2.header
assert hdu2.header["TCTYP2"] == "RA---TAN"
assert hdu2.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu2.header["RA"] == 1.5
assert hdu2.header["DEC"] == 3.0
def test_empty_table(tmp_path):
ofile = tmp_path / "emptytable.fits"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
ofile = tmp_path / "emptytable.fits.gz"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
def test_a3dtable(tmp_path):
testfile = tmp_path / "test.fits"
hdu = fits.BinTableHDU.from_columns(
[fits.Column(name="FOO", format="J", array=np.arange(10))]
)
hdu.header["XTENSION"] = "A3DTABLE"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].header["XTENSION"] == "A3DTABLE"
with pytest.warns(AstropyUserWarning) as w:
hdul.verify("fix")
assert str(w[0].message) == "Verification reported errors:"
assert str(w[2].message).endswith("Converted the XTENSION keyword to BINTABLE.")
assert hdul[1].header["XTENSION"] == "BINTABLE"
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header["FOO"] = None
hdu.header.cards["FOO"]._value = np.nan
testfile = tmp_path / "test.fits"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / "invalid_unit.fits"
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = "1 / (MeV sr s)"
unit = Unit(invalid_unit)
t = Table({"a": [1, 2, 3]})
t.write(path)
with fits.open(path, mode="update") as hdul:
hdul[1].header["TUNIT1"] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t["a"].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict="silent")
assert isinstance(t["a"].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict="raise")
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict="warn")
|
0836844873c4d8f59157d9ea13f936be3b26e56956c67265e8fc637ebd4edbb1 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import io
import os
import subprocess
import sys
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.hdu.base import _NonstandardHDU, _ValidHDU
from astropy.io.fits.verify import VerifyError, VerifyWarning
from astropy.utils.data import get_pkg_data_filenames
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
class TestHDUListFunctions(FitsTestCase):
def test_update_name(self):
with fits.open(self.data("o4sp040b0_raw.fits")) as hdul:
hdul[4].name = "Jim"
hdul[4].ver = 9
assert hdul[("JIM", 9)].header["extname"] == "JIM"
def test_hdu_file_bytes(self):
with fits.open(self.data("checksum.fits")) as hdul:
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
def test_fileinfo(**kwargs):
assert res["datSpan"] == kwargs.get("datSpan", 2880)
assert res["resized"] == kwargs.get("resized", False)
assert res["filename"] == self.data("checksum.fits")
assert res["datLoc"] == kwargs.get("datLoc", 8640)
assert res["hdrLoc"] == kwargs.get("hdrLoc", 0)
assert res["filemode"] == "readonly"
with fits.open(self.data("checksum.fits")) as hdul:
res = hdul.fileinfo(0)
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145
Ensure that a validation error occurs when saving an HDUList containing
multiple PrimaryHDUs.
"""
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
pytest.raises(
VerifyError, hdul.writeto, self.temp("temp.fits"), output_verify="exception"
)
def test_append_primary_to_empty_list(self):
# Tests appending a Simple PrimaryHDU to an empty HDUList.
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_extension_to_empty_list(self):
"""Tests appending a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, "PRIMARY", 1, "PrimaryHDU", 4, (100,), "int32", "")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_table_extension_to_empty_list(self):
"""Tests appending a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data("tb.fits")) as hdul1:
hdul.append(hdul1[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_groupshdu_to_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_primary_to_non_empty_list(self):
"""Tests appending a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data("arange.fits")) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 7, (11, 10, 7), "int32", ""),
(1, "", 1, "ImageHDU", 6, (100,), "int32", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_extension_to_non_empty_list(self):
"""Tests appending a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data("tb.fits")) as hdul:
hdul.append(hdul[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 11, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
(2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_groupshdu_to_non_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.append(hdu)
def test_insert_primary_to_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_extension_to_empty_list(self):
"""Tests inserting a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, "PRIMARY", 1, "PrimaryHDU", 4, (100,), "int32", "")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_table_extension_to_empty_list(self):
"""Tests inserting a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data("tb.fits")) as hdul1:
hdul.insert(0, hdul1[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_primary_to_non_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data("arange.fits")) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 7, (11, 10, 7), "int32", ""),
(1, "", 1, "ImageHDU", 6, (100,), "int32", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_extension_to_non_empty_list(self):
"""Tests inserting a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data("tb.fits")) as hdul:
hdul.insert(1, hdul[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 11, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
(2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.insert(1, hdu)
info = [
(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters"),
(1, "", 1, "ImageHDU", 6, (100,), "int32", ""),
]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
"""
Tests inserting a Simple GroupsHDU to the beginning of an HDUList
that that already contains a GroupsHDU.
"""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
with pytest.raises(ValueError):
hdul.insert(0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
# Tests inserting a Simple ExtensionHDU to a non-empty HDUList.
with fits.open(self.data("tb.fits")) as hdul:
hdul.insert(0, hdul[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
(2, "", 1, "ImageHDU", 12, (), "", ""),
(3, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
"""
Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList
as the primary HDU.
"""
with fits.open(self.data("tb.fits")) as hdul:
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", ""),
(1, "", 1, "ImageHDU", 12, (), "", ""),
(2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_filename(self, home_is_data):
"""Tests the HDUList filename method."""
with fits.open(self.data("tb.fits")) as hdul:
name = hdul.filename()
assert name == os.path.expanduser(self.data("tb.fits"))
def test_file_like(self):
"""
Tests the use of a file like object with no tell or seek methods
in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto()
"""
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp("tmpfile.fits"), "wb")
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert fits.info(self.temp("tmpfile.fits"), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp("tmpfile.fits"), "wb")
hdul = fits.open(tmpfile, mode="ostream")
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert fits.info(self.temp("tmpfile.fits"), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp("tmpfile.fits"), "wb")
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert fits.info(self.temp("tmpfile.fits"), output=False) == info
def test_shallow_copy(self):
"""
Tests that `HDUList.__copy__()` and `HDUList.copy()` return a
shallow copy (regression test for #7211).
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
for hdulcopy in (hdul.copy(), copy.copy(hdul)):
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
assert hdulcopy[0] is hdul[0]
assert hdulcopy[1] is hdul[1]
def test_deep_copy(self):
"""
Tests that `HDUList.__deepcopy__()` returns a deep copy.
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
hdulcopy = copy.deepcopy(hdul)
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
for index in range(len(hdul)):
assert hdulcopy[index] is not hdul[index]
assert hdulcopy[index].header == hdul[index].header
np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data)
def test_new_hdu_extname(self):
"""
Tests that new extension HDUs that are added to an HDUList can be
properly indexed by their EXTNAME/EXTVER (regression test for
ticket:48).
"""
with fits.open(self.data("test0.fits")) as f:
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdu = fits.ImageHDU(header=f[1].header)
hdul.append(hdu)
assert hdul[1].header["EXTNAME"] == "SCI"
assert hdul[1].header["EXTVER"] == 1
assert hdul.index_of(("SCI", 1)) == 1
assert hdul.index_of(hdu) == len(hdul) - 1
def test_update_filelike(self):
"""Test opening a file-like object in update mode and resizing the
HDU.
"""
sf = io.BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode="update")
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
"""Test flushing changes to a file opened in a read only mode."""
oldmtime = os.stat(self.data("test0.fits")).st_mtime
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header["FOO"] = "BAR"
with pytest.warns(AstropyUserWarning, match="mode is not supported") as w:
hdul.flush()
assert len(w) == 1
assert oldmtime == os.stat(self.data("test0.fits")).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header["EXTEND"]
hdul.verify("silentfix")
assert "EXTEND" in hdul[0].header
assert hdul[0].header["EXTEND"] is True
def test_fix_malformed_naxisj(self):
"""
Tests that malformed NAXISj values are fixed sensibly.
"""
hdu = fits.open(self.data("arange.fits"))
# Malform NAXISj header data
hdu[0].header["NAXIS1"] = 11.0
hdu[0].header["NAXIS2"] = "10.0"
hdu[0].header["NAXIS3"] = "7"
# Axes cache needs to be malformed as well
hdu[0]._axes = [11.0, "10.0", "7"]
# Perform verification including the fix
hdu.verify("silentfix")
# Check that malformed data was converted
assert hdu[0].header["NAXIS1"] == 11
assert hdu[0].header["NAXIS2"] == 10
assert hdu[0].header["NAXIS3"] == 7
hdu.close()
def test_fix_wellformed_naxisj(self):
"""
Tests that wellformed NAXISj values are not modified.
"""
hdu = fits.open(self.data("arange.fits"))
# Fake new NAXISj header data
hdu[0].header["NAXIS1"] = 768
hdu[0].header["NAXIS2"] = 64
hdu[0].header["NAXIS3"] = 8
# Axes cache needs to be faked as well
hdu[0]._axes = [768, 64, 8]
# Perform verification including the fix
hdu.verify("silentfix")
# Check that malformed data was converted
assert hdu[0].header["NAXIS1"] == 768
assert hdu[0].header["NAXIS2"] == 64
assert hdu[0].header["NAXIS3"] == 8
hdu.close()
def test_new_hdulist_extend_keyword(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114
Tests that adding a PrimaryHDU to a new HDUList object updates the
EXTEND keyword on that HDU.
"""
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array([10]))
hdul = fits.HDUList([hdu, sci])
assert "EXTEND" in hdu.header
assert hdu.header["EXTEND"] is True
hdul.writeto(self.temp("temp.fits"))
hdr = fits.getheader(self.temp("temp.fits"))
assert "EXTEND" in hdr
assert hdr["EXTEND"] is True
def test_replace_memmaped_array(self, home_is_temp):
# Copy the original before we modify it
with fits.open(self.data("test0.fits")) as hdul:
hdul.writeto(self.temp("temp.fits"))
hdul = fits.open(self.temp("temp.fits"), mode="update", memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
with fits.open(self.temp("temp.fits"), memmap=True) as hdul:
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_bad_file_padding(self):
"""
Test warning when opening files with extra padding at the end.
See https://github.com/astropy/astropy/issues/4351
"""
# write some arbitrary data to a FITS file
fits.writeto(self.temp("temp.fits"), np.arange(100))
# append some arbitrary number of zeros to the end
with open(self.temp("temp.fits"), "ab") as fobj:
fobj.write(b"\x00" * 1234)
with pytest.warns(
AstropyUserWarning, match="Unexpected extra padding at the end of the file."
) as w:
with fits.open(self.temp("temp.fits")) as fobj:
fobj.info()
assert len(w) == 1
@pytest.mark.filterwarnings("ignore:Unexpected extra padding")
def test_open_file_with_end_padding(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106
Open files with end padding bytes.
"""
with fits.open(self.data("test0.fits"), do_not_scale_image_data=True) as hdul:
info = hdul.info(output=False)
hdul.writeto(self.temp("temp.fits"))
with open(self.temp("temp.fits"), "ab") as f:
f.seek(0, os.SEEK_END)
f.write(b"\0" * 2880)
assert info == fits.info(
self.temp("temp.fits"), output=False, do_not_scale_image_data=True
)
def test_open_file_with_bad_header_padding(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136
Open files with nulls for header block padding instead of spaces.
"""
a = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp("temp.fits"))
# Figure out where the header padding begins and fill it with nulls
end_card_pos = str(hdu.header).index("END" + " " * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp("temp.fits"), "r+b") as f:
f.seek(padding_start)
f.write(b"\0" * padding_len)
with pytest.warns(
AstropyUserWarning, match="contains null bytes instead of spaces"
) as w:
with fits.open(self.temp("temp.fits")) as hdul:
assert (hdul[0].data == a).all()
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
def test_update_with_truncated_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148
Test that saving an update where the header is shorter than the
original header doesn't leave a stump from the old header in the file.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header[f"TEST{idx}"] = idx
idx += 1
hdu.writeto(self.temp("temp.fits"), checksum=True)
with fits.open(self.temp("temp.fits"), mode="update") as hdul:
# Modify the header, forcing it to be rewritten
hdul[0].header["TEST1"] = 2
with fits.open(self.temp("temp.fits")) as hdul:
assert (hdul[0].data == data).all()
def test_update_resized_header(self, home_is_temp):
"""
Test saving updates to a file where the header is one block smaller
than before, and in the case where the header is one block larger than
before.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header[f"TEST{idx}"] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp("temp.fits"))
with fits.open(self.temp("temp.fits"), mode="update") as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp("temp.fits")) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp("temp.fits"), mode="update") as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f"TEST{idx}"] = idx
idx += 1
# Touch something in the data too so that it has to be rewritten
hdul[0].data[0] = 27
with fits.open(self.temp("temp.fits")) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self, home_is_temp):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150
This is similar to test_update_resized_header, but specifically tests a
case of multiple consecutive flush() calls on the same HDUList object,
where each flush() requires a resize.
"""
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp("temp.fits"))
with fits.open(self.temp("temp.fits"), mode="append") as hdul:
hdul.append(hdu)
with fits.open(self.temp("temp.fits"), mode="update") as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f"TEST{idx}"] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp("temp.fits")) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
def test_hdul_fromstring(self):
"""
Test creating the HDUList structure in memory from a string containing
an entire FITS file. This is similar to test_hdu_fromstring but for an
entire multi-extension FITS file at once.
"""
# Tests HDUList.fromstring for all of Astropy's built in test files
def test_fromstring(filename):
with fits.open(filename) as hdul:
orig_info = hdul.info(output=False)
with open(filename, "rb") as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif hdul[idx].data.dtype.fields and hdul2[idx].data.dtype.fields:
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif any(dim == 0 for dim in hdul[idx].data.shape) or any(
dim == 0 for dim in hdul2[idx].data.shape
):
# For some reason some combinations of Python and Numpy
# on Windows result in MemoryErrors when trying to work
# on memmap arrays with more than one dimension but
# some dimensions of size zero, so include a special
# case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data, hdul2[idx].data)
for filename in get_pkg_data_filenames("data", pattern="*.fits"):
if sys.platform == "win32" and filename.endswith("zerowidth.fits"):
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See ticket:
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
elif filename.endswith(("variable_length_table.fits", "theap-gap.fits")):
# Comparing variable length arrays is non-trivial and thus
# skipped at this point.
# TODO: That's probably possible, so one could make it work.
continue
test_fromstring(filename)
# Test that creating an HDUList from something silly raises a TypeError
pytest.raises(TypeError, fits.HDUList.fromstring, ["a", "b", "c"])
@pytest.mark.filterwarnings("ignore:Saving a backup")
def test_save_backup(self, home_is_temp):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121
Save backup of file before flushing changes.
"""
self.copy_file("scale.fits")
with fits.open(
self.temp("scale.fits"), mode="update", save_backup=True
) as hdul:
# Make some changes to the original file to force its header
# and data to be rewritten
hdul[0].header["TEST"] = "TEST"
# This emits warning that needs to be ignored at the
# pytest.mark.filterwarnings level.
hdul[0].data[0] = 0
assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak")))
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul1:
with fits.open(
self.temp("scale.fits.bak"), do_not_scale_image_data=True
) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with fits.open(
self.temp("scale.fits"), mode="update", save_backup=True
) as hdul:
# One more time to see if multiple backups are made
hdul[0].header["TEST2"] = "TEST"
hdul[0].data[0] = 1
assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak")))
assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak.1")))
def test_replace_mmap_data(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp("test_a.fits"), overwrite=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp("test_b.fits"), overwrite=True)
with fits.open(
self.temp("test_a.fits"), mode="update", memmap=mmap_a
) as hdul_a:
with fits.open(self.temp("test_b.fits"), memmap=mmap_b) as hdul_b:
hdul_a[0].data = hdul_b[0].data
with fits.open(self.temp("test_a.fits")) as hdul_a:
assert np.all(hdul_a[0].data == arr_b)
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work. Like test_replace_mmap_data but with
table data instead of image data.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name="a", format="J", array=arr_a)
col_b = fits.Column(name="b", format="J", array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp("test_a.fits"), overwrite=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp("test_b.fits"), overwrite=True)
with fits.open(
self.temp("test_a.fits"), mode="update", memmap=mmap_a
) as hdul_a:
with fits.open(self.temp("test_b.fits"), memmap=mmap_b) as hdul_b:
hdul_a[1].data = hdul_b[1].data
with fits.open(self.temp("test_a.fits")) as hdul_a:
assert "b" in hdul_a[1].columns.names
assert "a" not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data["b"] == arr_b)
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_extname_in_hdulist(self):
"""
Tests to make sure that the 'in' operator works.
Regression test for https://github.com/astropy/astropy/issues/3060
"""
with fits.open(self.data("o4sp040b0_raw.fits")) as hdulist:
hdulist.append(fits.ImageHDU(name="a"))
assert "a" in hdulist
assert "A" in hdulist
assert ("a", 1) in hdulist
assert ("A", 1) in hdulist
assert "b" not in hdulist
assert ("a", 2) not in hdulist
assert ("b", 1) not in hdulist
assert ("b", 2) not in hdulist
assert hdulist[0] in hdulist
assert fits.ImageHDU() not in hdulist
def test_overwrite(self, home_is_temp):
hdulist = fits.HDUList([fits.PrimaryHDU()])
hdulist.writeto(self.temp("test_overwrite.fits"))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdulist.writeto(self.temp("test_overwrite.fits"), overwrite=False)
hdulist.writeto(self.temp("test_overwrite.fits"), overwrite=True)
def test_invalid_hdu_key_in_contains(self):
"""
Make sure invalid keys in the 'in' operator return False.
Regression test for https://github.com/astropy/astropy/issues/5583
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
# A more or less random assortment of things which are not valid keys.
bad_keys = [None, 3.5, {}]
for key in bad_keys:
assert key not in hdulist
def test_iteration_of_lazy_loaded_hdulist(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5585
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU(name="SCI"))
hdulist.append(fits.ImageHDU(name="SCI"))
hdulist.append(fits.ImageHDU(name="nada"))
hdulist.append(fits.ImageHDU(name="SCI"))
filename = self.temp("many_extension.fits")
hdulist.writeto(filename)
f = fits.open(filename)
# Check that all extensions are read if f is not sliced
all_exts = [ext for ext in f]
assert len(all_exts) == 5
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Try a simple slice with no conditional on the ext. This is essentially
# the reported failure.
all_exts_but_zero = [ext for ext in f[1:]]
assert len(all_exts_but_zero) == 4
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Check whether behavior is proper if the upper end of the slice is not
# omitted.
read_exts = [ext for ext in f[1:4] if ext.header["EXTNAME"] == "SCI"]
assert len(read_exts) == 2
f.close()
def test_read_non_standard_hdu(self):
filename = self.temp("bad-fits.fits")
hdu = fits.PrimaryHDU()
hdu.header["FOO"] = "BAR"
buf = io.BytesIO()
hdu.writeto(buf)
buf.seek(0)
hdustr = buf.read()
hdustr = hdustr.replace(
b"SIMPLE = T", b"SIMPLE = F"
)
with open(filename, mode="wb") as f:
f.write(hdustr)
with fits.open(filename) as hdul:
assert isinstance(hdul[0], _NonstandardHDU)
assert hdul[0].header["FOO"] == "BAR"
def test_proper_error_raised_on_non_fits_file(self):
filename = self.temp("not-fits.fits")
with open(filename, mode="w", encoding="utf=8") as f:
f.write("Not a FITS file")
match = (
"No SIMPLE card found, this file does not appear to be a valid FITS file"
)
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match=match):
fits.open(filename)
with pytest.raises(OSError, match=match):
fits.open(filename, mode="append")
with pytest.raises(OSError, match=match):
fits.open(filename, mode="update")
def test_proper_error_raised_on_invalid_fits_file(self):
filename = self.temp("bad-fits.fits")
hdu = fits.PrimaryHDU()
hdu.header["FOO"] = "BAR"
buf = io.BytesIO()
hdu.writeto(buf)
# write 80 additional bytes so the block will have the correct size
buf.write(b" " * 80)
buf.seek(0)
buf.seek(80) # now remove the SIMPLE card
with open(filename, mode="wb") as f:
f.write(buf.read())
match = (
"No SIMPLE card found, this file does not appear to be a valid FITS file"
)
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match=match):
fits.open(filename)
with pytest.raises(OSError, match=match):
fits.open(filename, mode="append")
with pytest.raises(OSError, match=match):
fits.open(filename, mode="update")
with fits.open(filename, ignore_missing_simple=True) as hdul:
assert isinstance(hdul[0], _ValidHDU)
assert hdul[0].header["FOO"] == "BAR"
def test_warning_raised_on_non_standard_simple_card(self):
filename = self.temp("bad-fits.fits")
hdu = fits.PrimaryHDU()
hdu.header["FOO"] = "BAR"
buf = io.BytesIO()
hdu.writeto(buf)
# change the simple card format
buf.seek(0)
buf.write(b"SIMPLE = T ")
buf.seek(0)
with open(filename, mode="wb") as f:
f.write(buf.read())
match = "Found a SIMPLE card but its format doesn't respect the FITS Standard"
with pytest.warns(VerifyWarning, match=match):
fits.open(filename)
with pytest.warns(VerifyWarning, match=match):
fits.open(filename, mode="append")
with pytest.warns(VerifyWarning, match=match):
fits.open(filename, mode="update")
with fits.open(filename, ignore_missing_simple=True) as hdul:
assert isinstance(hdul[0], _ValidHDU)
assert hdul[0].header["FOO"] == "BAR"
# change the simple card format
buf.seek(0)
buf.write(b"SIMPLE = T / This is a FITS file")
buf.seek(0)
with open(filename, mode="wb") as f:
f.write(buf.read())
with pytest.warns(VerifyWarning, match=match):
fits.open(filename)
def test_proper_error_raised_on_non_fits_file_with_unicode(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5594
The failure shows up when (in python 3+) you try to open a file
with unicode content that is not actually a FITS file. See:
https://github.com/astropy/astropy/issues/5594#issuecomment-266583218
"""
filename = self.temp("not-fits-with-unicode.fits")
with open(filename, mode="w", encoding="utf=8") as f:
f.write("Ce\xe7i ne marche pas")
# This should raise an OSError because there is no end card.
with pytest.raises(
OSError,
match=(
"No SIMPLE card found, this file "
"does not appear to be a valid FITS file"
),
):
fits.open(filename)
def test_no_resource_warning_raised_on_non_fits_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6168
The ResourceWarning shows up when (in python 3+) you try to
open a non-FITS file when using a filename.
"""
# To avoid creating the file multiple times the tests are
# all included in one test file. See the discussion to the
# PR at https://github.com/astropy/astropy/issues/6168
#
filename = self.temp("not-fits.fits")
with open(filename, mode="w") as f:
f.write("# header line\n")
f.write("0.1 0.2\n")
# Opening the file should raise an OSError however the file
# is opened (there are two distinct code paths, depending on
# whether ignore_missing_end is True or False).
#
# Explicit tests are added to make sure the file handle is not
# closed when passed in to fits.open. In this case the ResourceWarning
# was not raised.
# Make sure that files opened by the user are not closed
with open(filename, mode="rb") as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=False)
assert not f.closed
with open(filename, mode="rb") as f:
with pytest.raises(OSError), pytest.warns(VerifyWarning):
fits.open(f, ignore_missing_end=True)
assert not f.closed
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=False)
with pytest.raises(OSError), pytest.warns(VerifyWarning):
fits.open(filename, ignore_missing_end=True)
def test_pop_with_lazy_load(self):
filename = self.data("checksum.fits")
with fits.open(filename) as hdul:
# Try popping the hdulist before doing anything else. This makes sure
# that https://github.com/astropy/astropy/issues/7185 is fixed.
hdu = hdul.pop()
assert len(hdul) == 1
# Read the file again and try popping from the beginning
with fits.open(filename) as hdul2:
hdu2 = hdul2.pop(0)
assert len(hdul2) == 1
# Just a sanity check
with fits.open(filename) as hdul3:
assert len(hdul3) == 2
assert hdul3[0].header == hdu2.header
assert hdul3[1].header == hdu.header
def test_pop_extname(self):
with fits.open(self.data("o4sp040b0_raw.fits")) as hdul:
assert len(hdul) == 7
hdu1 = hdul[1]
hdu4 = hdul[4]
hdu_popped = hdul.pop(("SCI", 2))
assert len(hdul) == 6
assert hdu_popped is hdu4
hdu_popped = hdul.pop("SCI")
assert len(hdul) == 5
assert hdu_popped is hdu1
# Skip due to https://github.com/astropy/astropy/issues/8916
@pytest.mark.skipif(
sys.platform.startswith("win32"), reason="Cannot test on Windows"
)
def test_write_hdulist_to_stream(self):
"""
Unit test for https://github.com/astropy/astropy/issues/7435
to ensure that an HDUList can be written to a stream.
"""
data = np.array([[1, 2, 3], [4, 5, 6]])
hdu = fits.PrimaryHDU(data)
hdulist = fits.HDUList([hdu])
with open(self.temp("test.fits"), "wb") as fout:
with subprocess.Popen(["cat"], stdin=subprocess.PIPE, stdout=fout) as p:
hdulist.writeto(p.stdin)
def test_output_verify(self):
hdul = fits.HDUList([fits.PrimaryHDU()])
hdul[0].header["FOOBAR"] = 42
hdul.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
data = f.read()
# create invalid card
data = data.replace(b"FOOBAR =", b"FOOBAR = ")
with open(self.temp("test2.fits"), "wb") as f:
f.write(data)
with pytest.raises(VerifyError):
with fits.open(self.temp("test2.fits"), mode="update") as hdul:
hdul[0].header["MORE"] = "here"
with pytest.warns(VerifyWarning) as ww:
with fits.open(
self.temp("test2.fits"), mode="update", output_verify="fix+warn"
) as hdul:
hdul[0].header["MORE"] = "here"
assert len(ww) == 6
msg = "Card 'FOOBAR ' is not FITS standard (equal sign not at column 8)"
assert msg in str(ww[3].message)
|
44a400ee4353644e4e57f3f31630eb3a49039e4be0545ef71059a25223a4bc58 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import io
import os
import pathlib
import warnings
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.io import fits
from astropy.io.fits import printdiff
from astropy.io.fits.connect import REMOVE_KEYWORDS
from astropy.io.fits.tests.test_table import _assert_attr_col
from astropy.table import Table
from astropy.utils.exceptions import AstropyUserWarning
from .conftest import FitsTestCase
class TestConvenience(FitsTestCase):
def test_resource_warning(self):
warnings.simplefilter("always", ResourceWarning)
_ = fits.getdata(self.data("test0.fits"))
_ = fits.getheader(self.data("test0.fits"))
def test_fileobj_not_closed(self):
"""
Tests that file-like objects are not closed after being passed
to convenience functions.
Regression test for https://github.com/astropy/astropy/issues/5063
"""
f = open(self.data("test0.fits"), "rb")
_ = fits.getdata(f)
assert not f.closed
f.seek(0)
_ = fits.getheader(f)
assert not f.closed
f.close() # Close it now
def test_table_to_hdu(self):
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
with pytest.warns(
u.UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.table_to_hdu(table)
assert len(w) == 1
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert isinstance(hdu, fits.BinTableHDU)
filename = self.temp("test_table_to_hdu.fits")
hdu.writeto(filename, overwrite=True)
def test_masked_table_to_hdu(self):
i = np.ma.MaskedArray([1, 2, 3], mask=[True, False, False])
s = np.ma.MaskedArray(["a", "b", "c"], mask=[False, True, True])
c = np.ma.MaskedArray([2.3 + 1j, 4.5 + 0j, 6.7 - 1j], mask=[True, False, True])
f = np.ma.MaskedArray([2.3, 4.5, 6.7], mask=[True, False, True])
table = Table([i, s, c, f], names=["i", "s", "c", "f"])
# Check that FITS standard is used in replacing masked values.
hdu = fits.table_to_hdu(table)
assert isinstance(hdu, fits.BinTableHDU)
assert hdu.header["TNULL1"] == i.fill_value
assert_array_equal(hdu.data["i"], i.filled())
assert_array_equal(hdu.data["s"], s.filled(""))
assert_array_equal(hdu.data["c"], c.filled(np.nan))
assert_array_equal(hdu.data["c"].real, c.real.filled(np.nan))
assert_array_equal(hdu.data["c"].imag, c.imag.filled(np.nan))
assert_array_equal(hdu.data["c"], c.filled(complex(np.nan, np.nan)))
assert_array_equal(hdu.data["f"], f.filled(np.nan))
filename = self.temp("test_table_to_hdu.fits")
hdu.writeto(filename, overwrite=True)
def test_table_non_stringifyable_unit_to_hdu(self):
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = u.core.IrreducibleUnit("test")
with pytest.warns(
AstropyUserWarning, match="The unit 'test' could not be saved"
) as w:
fits.table_to_hdu(table)
assert len(w) == 1
def test_table_to_hdu_convert_comment_convention(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table.meta["comments"] = ["This", "is", "a", "comment"]
hdu = fits.table_to_hdu(table)
assert hdu.header.get("comment") == ["This", "is", "a", "comment"]
with pytest.raises(ValueError):
hdu.header.index("comments")
def test_table_to_hdu_filter_reserved(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9387
"""
diag = "be ignored since it conflicts with a FITS reserved keyword"
ins_cards = {
"EXPTIME": 32.1,
"XTENSION": "NEWTABLE",
"NAXIS": 1,
"NAXIS1": 3,
"NAXIS2": 9,
"PCOUNT": 42,
"OBSERVER": "Adams",
}
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i4", "U1", "f8"],
)
table.meta.update(ins_cards)
with pytest.warns(
AstropyUserWarning, match=rf"Meta-data keyword \w+ will {diag}"
) as w:
hdu = fits.table_to_hdu(table)
# This relies on the warnings being raised in the order of the
# meta dict (note that the first and last card are legitimate keys)
assert len(w) == len(ins_cards) - 2
for i, key in enumerate(list(ins_cards)[1:-1]):
assert f"Meta-data keyword {key}" in str(w[i].message)
assert hdu.header.get("XTENSION") == "BINTABLE"
assert hdu.header.get("NAXIS") == 2
assert hdu.header.get("NAXIS1") == 13
assert hdu.header.get("NAXIS2") == 3
assert hdu.header.get("PCOUNT") == 0
np.testing.assert_almost_equal(hdu.header.get("EXPTIME"), 3.21e1)
@pytest.mark.parametrize("card", REMOVE_KEYWORDS)
def test_table_to_hdu_warn_reserved(self, card):
"""
Test warning for each keyword in ..connect.REMOVE_KEYWORDS, 1 by 1
"""
diag = "be ignored since it conflicts with a FITS reserved keyword"
res_cards = {
"XTENSION": "BINTABLE",
"BITPIX": 8,
"NAXIS": 2,
"NAXIS1": 12,
"NAXIS2": 3,
"PCOUNT": 0,
"GCOUNT": 1,
"TFIELDS": 2,
"THEAP": None,
}
ins_cards = {
"XTENSION": "TABLE",
"BITPIX": 16,
"NAXIS": 1,
"NAXIS1": 2,
"NAXIS2": 6,
"PCOUNT": 2,
"GCOUNT": 2,
"TFIELDS": 4,
"THEAP": 36,
}
table = Table(
[[1.0, 2.0, 3.0], [2.3, 4.5, 6.7]],
names=["wavelength", "flux"],
dtype=["f8", "f4"],
)
table.meta["ORIGIN"] = "Min.Silly Walks"
table.meta[card] = ins_cards[card]
assert table.meta.get(card) != res_cards[card]
with pytest.warns(
AstropyUserWarning, match=f"Meta-data keyword {card} will {diag}"
):
hdu = fits.table_to_hdu(table)
assert hdu.header.get(card) == res_cards[card]
assert hdu.header.get("ORIGIN") == "Min.Silly Walks"
def test_table_to_hdu_filter_incompatible(self):
"""
Test removal of unsupported data types from header
"""
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i4", "U1", "f8"],
)
table.meta.update(
{
"OBSDATE": "2001-05-26",
"RAMP": np.arange(5),
"TARGETS": {"PRIMARY": 1, "SECONDAR": 3},
}
)
with pytest.warns(
AstropyUserWarning,
match=r"Attribute \S+ of type "
r".+ cannot be added to FITS Header - skipping",
):
hdu = fits.table_to_hdu(table)
assert hdu.header.get("OBSDATE") == "2001-05-26"
assert "RAMP" not in hdu.header
assert "TARGETS" not in hdu.header
def test_table_writeto_header(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5988
"""
data = np.zeros((5,), dtype=[("x", float), ("y", int)])
h_in = fits.Header()
h_in["ANSWER"] = (42.0, "LTU&E")
filename = self.temp("tabhdr42.fits")
fits.writeto(filename, data=data, header=h_in, overwrite=True)
h_out = fits.getheader(filename, ext=1)
assert h_out["ANSWER"] == 42
def test_image_extension_update_header(self, home_is_temp):
"""
Test that _makehdu correctly includes the header. For example in the
fits.update convenience function.
"""
filename = self.temp("twoextension.fits")
hdus = [fits.PrimaryHDU(np.zeros((10, 10))), fits.ImageHDU(np.zeros((10, 10)))]
# Try to update a non-existent file
with pytest.raises(FileNotFoundError, match="No such file"):
fits.update(
filename, np.zeros((10, 10)), header=fits.Header([("WHAT", 100)]), ext=1
)
fits.HDUList(hdus).writeto(filename)
fits.update(
filename, np.zeros((10, 10)), header=fits.Header([("WHAT", 100)]), ext=1
)
h_out = fits.getheader(filename, ext=1)
assert h_out["WHAT"] == 100
def test_printdiff(self):
"""
Test that FITSDiff can run the different inputs without crashing.
"""
# Testing different string input options
assert printdiff(self.data("arange.fits"), self.data("blank.fits")) is None
assert (
printdiff(self.data("arange.fits"), self.data("blank.fits"), ext=0) is None
)
assert (
printdiff(
self.data("o4sp040b0_raw.fits"),
self.data("o4sp040b0_raw.fits"),
extname="sci",
)
is None
)
# This may seem weird, but check printdiff to see, need to test
# incorrect second file
with pytest.raises(OSError):
printdiff("o4sp040b0_raw.fits", "fakefile.fits", extname="sci")
# Test HDU object inputs
with fits.open(self.data("stddata.fits"), mode="readonly") as in1:
with fits.open(self.data("checksum.fits"), mode="readonly") as in2:
assert printdiff(in1[0], in2[0]) is None
with pytest.raises(ValueError):
printdiff(in1[0], in2[0], ext=0)
assert printdiff(in1, in2) is None
with pytest.raises(NotImplementedError):
printdiff(in1, in2, 0)
def test_tabledump(self):
"""
A simple test of the dump method.
Also regression test for https://github.com/astropy/astropy/issues/6937
"""
datastr = (
'" 1" "abc" " 3.70000007152557" " 0"\n'
'" 2" "xy " " 6.69999971389771" " 1"\n'
)
cdstr = (
'c1 1J I11 "" ""'
' -2147483647 "" "" \n'
'c2 3A A3 "" ""'
' "" "" "" \n'
'c3 1E G15.7 "" ""'
' "" 3 0.4 \n'
'c4 1L L6 "" ""'
' "" "" "" \n'
)
# copy fits file to the temp directory
self.copy_file("tb.fits")
# test without datafile
fits.tabledump(self.temp("tb.fits"))
assert os.path.isfile(self.temp("tb_1.txt"))
# test with datafile
fits.tabledump(self.temp("tb.fits"), datafile=self.temp("test_tb.txt"))
assert os.path.isfile(self.temp("test_tb.txt"))
# test with datafile and cdfile
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
fits.tabledump(self.temp("tb.fits"), datafile, cdfile)
assert os.path.isfile(datafile)
with open(datafile) as data:
assert data.read() == datastr
with open(cdfile) as coldefs:
assert coldefs.read() == cdstr
@pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
# copy fits file to the temp directory
self.copy_file(tablename)
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
fits.tabledump(self.temp(tablename), datafile, cdfile, hfile)
new_tbhdu = fits.tableload(datafile, cdfile, hfile)
with fits.open(self.temp(tablename)) as hdul:
_assert_attr_col(new_tbhdu, hdul[1])
def test_append_filename(self, home_is_temp):
"""
Test fits.append with a filename argument.
"""
data = np.arange(6)
testfile = self.temp("test_append_1.fits")
# Test case 1: creation of file
fits.append(testfile, data=data, checksum=True)
# Test case 2: append to existing file, with verify=True
# Also test that additional keyword can be passed to fitsopen
fits.append(testfile, data=data * 2, checksum=True, ignore_blank=True)
# Test case 3: append to existing file, with verify=False
fits.append(testfile, data=data * 3, checksum=True, verify=False)
with fits.open(testfile, checksum=True) as hdu1:
np.testing.assert_array_equal(hdu1[0].data, data)
np.testing.assert_array_equal(hdu1[1].data, data * 2)
np.testing.assert_array_equal(hdu1[2].data, data * 3)
@pytest.mark.parametrize("mode", ["wb", "wb+", "ab", "ab+"])
def test_append_filehandle(self, tmp_path, mode):
"""
Test fits.append with a file handle argument.
"""
append_file = tmp_path / "append.fits"
with append_file.open(mode) as handle:
fits.append(filename=handle, data=np.ones((4, 4)))
def test_append_with_header(self):
"""
Test fits.append with a fits Header, which triggers detection of the
HDU class. Regression test for
https://github.com/astropy/astropy/issues/8660
"""
testfile = self.temp("test_append_1.fits")
with fits.open(self.data("test0.fits")) as hdus:
for hdu in hdus:
fits.append(testfile, hdu.data, hdu.header, checksum=True)
with fits.open(testfile, checksum=True) as hdus:
assert len(hdus) == 5
def test_pathlib(self):
testfile = pathlib.Path(self.temp("test.fits"))
data = np.arange(10)
hdulist = fits.HDUList([fits.PrimaryHDU(data)])
hdulist.writeto(testfile)
with fits.open(testfile) as hdul:
np.testing.assert_array_equal(hdul[0].data, data)
def test_getdata_ext_given(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=2 * np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
for ext in [0, 1, 2]:
buf.seek(0)
data = fits.getdata(buf, ext=ext)
assert data[0, 0] == ext
def test_getdata_ext_given_nodata(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(IndexError, match="No data in HDU #2."):
fits.getdata(buf, ext=2)
def test_getdata_ext_not_given_with_data_in_primary(self):
prihdu = fits.PrimaryHDU(data=np.zeros((5, 5), dtype=int))
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 0
def test_getdata_ext_not_given_with_data_in_ext(self):
# tests fallback mechanism
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
exthdu2 = fits.ImageHDU(data=None)
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
data = fits.getdata(buf)
assert data[0, 0] == 1
def test_getdata_ext_not_given_nodata_any(self):
# tests exception raised when there is no data in either
# Primary HDU or first extension HDU
prihdu = fits.PrimaryHDU(data=None)
exthdu1 = fits.ImageHDU(data=None)
exthdu2 = fits.ImageHDU(data=np.ones((5, 5), dtype=int))
hdulist = fits.HDUList([prihdu, exthdu1, exthdu2])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError, match="No data in either Primary or first extension HDUs."
):
fits.getdata(buf)
def test_getdata_ext_not_given_nodata_noext(self):
# tests exception raised when there is no data in the
# Primary HDU and there are no extension HDUs
prihdu = fits.PrimaryHDU(data=None)
hdulist = fits.HDUList([prihdu])
buf = io.BytesIO()
hdulist.writeto(buf)
buf.seek(0)
with pytest.raises(
IndexError, match="No data in Primary HDU and no extension HDU found."
):
fits.getdata(buf)
|
681f9ffb8f6e0bc24e1182e7fe7c8ca0bc28873350b7ad8040bb17eef24ca5f7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Can `astropy.io.fits.open` access (remote) data using the fsspec package?
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.utils.compat.optional_deps import HAS_FSSPEC, HAS_S3FS
from astropy.utils.data import get_pkg_data_filename
if HAS_FSSPEC:
import fsspec
@pytest.mark.skipif(not HAS_FSSPEC, reason="requires fsspec")
def test_fsspec_local():
"""Can we use fsspec to read a local file?"""
fn = get_pkg_data_filename("data/test0.fits")
with fits.open(fn) as hdulist_classic:
with fits.open(fn, use_fsspec=True) as hdulist_fsspec:
assert_array_equal(hdulist_classic[2].data, hdulist_fsspec[2].data)
assert_array_equal(
hdulist_classic[2].section[3:5], hdulist_fsspec[2].section[3:5]
)
@pytest.mark.skipif(not HAS_FSSPEC, reason="requires fsspec")
def test_fsspec_local_write(tmp_path):
"""Can we write to a local file that was opened using fsspec?"""
fn = get_pkg_data_filename("data/test0.fits")
fn_tmp = tmp_path / "tmp.fits"
with fits.open(fn, use_fsspec=True) as hdul:
# writing to a section is never allowed
with pytest.raises(TypeError):
hdul[1].section[0, 0] = -999
# however writing to .data should work
hdul[1].data[2, 3] = -999
assert hdul[1].data[2, 3] == -999
hdul.writeto(fn_tmp)
# Is the new value present when we re-open the file?
with fits.open(fn_tmp) as hdul:
assert hdul[1].data[2, 3] == -999
@pytest.mark.skipif(not HAS_FSSPEC, reason="requires fsspec")
def test_fsspec_cutout2d():
"""Does Cutout2D work with data loaded lazily using fsspec and .section?"""
fn = get_pkg_data_filename("data/test0.fits")
with fits.open(fn, use_fsspec=True) as hdul:
position = (10, 20)
size = (2, 3)
cutout1 = Cutout2D(hdul[1].data, position, size)
cutout2 = Cutout2D(hdul[1].section, position, size)
assert_allclose(cutout1.data, cutout2.data)
@pytest.mark.skipif(not HAS_FSSPEC, reason="requires fsspec")
def test_fsspec_compressed():
"""Does fsspec support compressed data correctly?"""
# comp.fits[1] is a compressed image with shape (440, 300)
fn = get_pkg_data_filename("data/comp.fits")
with fits.open(fn, use_fsspec=True) as hdul:
# The .data attribute should work as normal
assert hdul[1].data[0, 0] == 7
# And the .section attribute should work too
assert hdul[1].section[0, 0] == 7
@pytest.mark.remote_data
class TestFsspecRemote:
"""Test obtaining cutouts from FITS files via HTTP (from MAST) and S3 (from Amazon)."""
def setup_class(self):
# The test file (ibxl50020_jif.fits) is a Hubble jitter FITS file (*.jif)
# rather than a real image, because jitter files are less likely to
# change due to reprocessing.
self.http_url = "https://mast.stsci.edu/api/v0.1/Download/file/?uri=mast:HST/product/ibxl50020_jif.fits"
self.s3_uri = "s3://stpubdata/hst/public/ibxl/ibxl50020/ibxl50020_jif.fits"
# Random slice was selected for testing:
self.slice = (slice(31, 33), slice(27, 30))
# The expected cutout array below was obtained by downloading the URIs
# listed above to a local path and and executing:
# with fits.open(local_path) as hdul:
# expected_cutout = hdul[1].data[31:33, 27:30]
self.expected_cutout = np.array([[24, 88, 228], [35, 132, 305]], dtype=np.int32)
@pytest.mark.skipif(not HAS_FSSPEC, reason="requires fsspec")
def test_fsspec_http(self):
"""Can we use fsspec to open a remote FITS file via http?"""
with fits.open(self.http_url, use_fsspec=True) as hdul:
# Do we retrieve the expected array?
assert_array_equal(hdul[1].section[self.slice], self.expected_cutout)
# The file has multiple extensions which are not yet downloaded;
# the repr and string representation should reflect this.
assert "partially read" in repr(hdul)
assert "partially read" in str(hdul)
# Can the user also pass an fsspec file object directly to fits open?
with fsspec.open(self.http_url) as fileobj:
with fits.open(fileobj) as hdul2:
assert_array_equal(hdul2[1].section[self.slice], self.expected_cutout)
assert "partially read" in repr(hdul)
assert "partially read" in str(hdul)
@pytest.mark.skipif(not HAS_S3FS, reason="requires s3fs")
def test_fsspec_s3(self):
"""Can we use fsspec to open a FITS file in a public Amazon S3 bucket?"""
with fits.open(
self.s3_uri, fsspec_kwargs={"anon": True}
) as hdul: # s3:// paths should default to use_fsspec=True
# Do we retrieve the expected array?
assert_array_equal(hdul[1].section[self.slice], self.expected_cutout)
# The file has multiple extensions which are not yet downloaded;
# the repr and string representation should reflect this.
assert "partially read" in repr(hdul)
assert "partially read" in str(hdul)
# Can the user also pass an fsspec file object directly to fits open?
with fsspec.open(self.s3_uri, anon=True) as fileobj:
with fits.open(fileobj) as hdul2:
assert_array_equal(hdul2[1].section[self.slice], self.expected_cutout)
assert "partially read" in repr(hdul)
assert "partially read" in str(hdul)
|
be83d0dab0284503df77e3d4f2ab6813ee4e12b59dcf464a728d6898ddd1a954 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.column import Column
from astropy.io.fits.diff import (
FITSDiff,
HDUDiff,
HeaderDiff,
ImageDataDiff,
TableDataDiff,
)
from astropy.io.fits.hdu import HDUList, ImageHDU, PrimaryHDU
from astropy.io.fits.hdu.base import NonstandardExtHDU
from astropy.io.fits.hdu.table import BinTableHDU
from astropy.io.fits.header import Header
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
class DummyNonstandardExtHDU(NonstandardExtHDU):
def __init__(self, data=None, *args, **kwargs):
super().__init__(self, *args, **kwargs)
self._buffer = np.asarray(data).tobytes()
self._data_offset = 0
@property
def size(self):
return len(self._buffer)
class TestDiff(FitsTestCase):
def test_identical_headers(self):
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
assert HeaderDiff(ha, hb).identical
assert HeaderDiff(ha.tostring(), hb.tostring()).identical
with pytest.raises(TypeError):
HeaderDiff(1, 2)
def test_slightly_different_headers(self):
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
hb["C"] = 4
assert not HeaderDiff(ha, hb).identical
def test_common_keywords(self):
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
hb["C"] = 4
hb["D"] = (5, "Comment")
assert HeaderDiff(ha, hb).common_keywords == ["A", "B", "C"]
def test_different_keyword_count(self):
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
del hb["B"]
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_count == (3, 2)
# But make sure the common keywords are at least correct
assert diff.common_keywords == ["A", "C"]
def test_different_keywords(self):
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
hb["C"] = 4
hb["D"] = (5, "Comment")
ha["E"] = (6, "Comment")
ha["F"] = (7, "Comment")
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keywords == (["E", "F"], ["D"])
def test_different_keyword_values(self):
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
hb["C"] = 4
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {"C": [(3, 4)]}
def test_different_keyword_comments(self):
ha = Header([("A", 1), ("B", 2), ("C", 3, "comment 1")])
hb = ha.copy()
hb.comments["C"] = "comment 2"
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_comments == {"C": [("comment 1", "comment 2")]}
def test_different_keyword_values_with_duplicate(self):
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
ha.append(("C", 4))
hb.append(("C", 5))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {"C": [None, (4, 5)]}
def test_asymmetric_duplicate_keywords(self):
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
ha.append(("A", 2, "comment 1"))
ha.append(("A", 3, "comment 2"))
hb.append(("B", 4, "comment 3"))
hb.append(("C", 5, "comment 4"))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {}
assert diff.diff_duplicate_keywords == {"A": (3, 1), "B": (1, 2), "C": (1, 2)}
report = diff.report()
assert (
"Inconsistent duplicates of keyword 'A' :\n"
" Occurs 3 time(s) in a, 1 times in (b)" in report
)
def test_floating_point_rtol(self):
ha = Header([("A", 1), ("B", 2.00001), ("C", 3.000001)])
hb = ha.copy()
hb["B"] = 2.00002
hb["C"] = 3.000002
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {
"B": [(2.00001, 2.00002)],
"C": [(3.000001, 3.000002)],
}
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert diff.diff_keyword_values == {"B": [(2.00001, 2.00002)]}
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert diff.identical
def test_floating_point_atol(self):
ha = Header([("A", 1), ("B", 1.0), ("C", 0.0)])
hb = ha.copy()
hb["B"] = 1.00001
hb["C"] = 0.000001
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert diff.diff_keyword_values == {
"B": [(1.0, 1.00001)],
"C": [(0.0, 0.000001)],
}
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert not diff.identical
assert diff.diff_keyword_values == {"C": [(0.0, 0.000001)]}
diff = HeaderDiff(ha, hb, atol=1e-6)
assert not diff.identical
assert diff.diff_keyword_values == {"B": [(1.0, 1.00001)]}
diff = HeaderDiff(ha, hb, atol=1e-5) # strict inequality
assert not diff.identical
assert diff.diff_keyword_values == {"B": [(1.0, 1.00001)]}
diff = HeaderDiff(ha, hb, rtol=1e-5, atol=1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, atol=1.1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, rtol=1e-6, atol=1e-6)
assert not diff.identical
def test_ignore_blanks(self):
with fits.conf.set_temp("strip_header_whitespace", False):
ha = Header([("A", 1), ("B", 2), ("C", "A ")])
hb = ha.copy()
hb["C"] = "A"
assert ha["C"] != hb["C"]
diff = HeaderDiff(ha, hb)
# Trailing blanks are ignored by default
assert diff.identical
assert diff.diff_keyword_values == {}
# Don't ignore blanks
diff = HeaderDiff(ha, hb, ignore_blanks=False)
assert not diff.identical
assert diff.diff_keyword_values == {"C": [("A ", "A")]}
@pytest.mark.parametrize("differ", [HeaderDiff, HDUDiff, FITSDiff])
def test_ignore_blank_cards(self, differ):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/152
Ignore blank cards.
"""
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = Header([("A", 1), ("", ""), ("B", 2), ("", ""), ("C", 3)])
hc = ha.copy()
if differ is HeaderDiff:
hc.append()
hc.append()
else: # Ensure blanks are not at the end as they are stripped by HDUs
hc.add_blank(after=-2)
hc.add_blank(after=-2)
if differ in (HDUDiff, FITSDiff): # wrap it in a PrimaryHDU
ha, hb, hc = (PrimaryHDU(np.arange(10), h) for h in (ha, hb, hc))
hc_header = hc.header
if differ is FITSDiff: # wrap it in a HDUList
ha, hb, hc = (HDUList([h]) for h in (ha, hb, hc))
hc_header = hc[0].header
# We now have a header with interleaved blanks, and a header with end
# blanks, both of which should ignore the blanks
assert differ(ha, hb).identical
assert differ(ha, hc).identical
assert differ(hb, hc).identical
assert not differ(ha, hb, ignore_blank_cards=False).identical
assert not differ(ha, hc, ignore_blank_cards=False).identical
# Both hb and hc have the same number of blank cards; since order is
# currently ignored, these should still be identical even if blank
# cards are not ignored
assert differ(hb, hc, ignore_blank_cards=False).identical
if differ is HeaderDiff:
hc.append()
else: # Ensure blanks are not at the end as they are stripped by HDUs
hc_header.add_blank(after=-2)
# But now there are different numbers of blanks, so they should not be
# ignored:
assert not differ(hb, hc, ignore_blank_cards=False).identical
def test_ignore_hdus(self):
a = np.arange(100).reshape(10, 10)
b = a.copy()
ha = Header([("A", 1), ("B", 2), ("C", 3)])
xa = np.array([(1.0, 1), (3.0, 4)], dtype=[("x", float), ("y", int)])
xb = np.array([(1.0, 2), (3.0, 5)], dtype=[("x", float), ("y", int)])
phdu = PrimaryHDU(header=ha)
ihdua = ImageHDU(data=a, name="SCI")
ihdub = ImageHDU(data=b, name="SCI")
bhdu1 = BinTableHDU(data=xa, name="ASDF")
bhdu2 = BinTableHDU(data=xb, name="ASDF")
hdula = HDUList([phdu, ihdua, bhdu1])
hdulb = HDUList([phdu, ihdub, bhdu2])
# ASDF extension should be different
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdus[0][0] == 2
# ASDF extension should be ignored
diff = FITSDiff(hdula, hdulb, ignore_hdus=["ASDF"])
assert diff.identical, diff.report()
diff = FITSDiff(hdula, hdulb, ignore_hdus=["ASD*"])
assert diff.identical, diff.report()
# SCI extension should be different
hdulb["SCI"].data += 1
diff = FITSDiff(hdula, hdulb, ignore_hdus=["ASDF"])
assert not diff.identical
# SCI and ASDF extensions should be ignored
diff = FITSDiff(hdula, hdulb, ignore_hdus=["SCI", "ASDF"])
assert diff.identical, diff.report()
# All EXTVER of SCI should be ignored
ihduc = ImageHDU(data=a, name="SCI", ver=2)
hdulb.append(ihduc)
diff = FITSDiff(hdula, hdulb, ignore_hdus=["SCI", "ASDF"])
assert not any(diff.diff_hdus), diff.report()
assert any(diff.diff_hdu_count), diff.report()
def test_ignore_keyword_values(self):
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
hb["B"] = 4
hb["C"] = 5
diff = HeaderDiff(ha, hb, ignore_keywords=["*"])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_keywords=["B"])
assert not diff.identical
assert diff.diff_keyword_values == {"C": [(3, 5)]}
report = diff.report()
assert "Keyword B has different values" not in report
assert "Keyword C has different values" in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_keywords=["b"])
assert not diff.identical
assert diff.diff_keyword_values == {"C": [(3, 5)]}
def test_ignore_keyword_comments(self):
ha = Header([("A", 1, "A"), ("B", 2, "B"), ("C", 3, "C")])
hb = ha.copy()
hb.comments["B"] = "D"
hb.comments["C"] = "E"
diff = HeaderDiff(ha, hb, ignore_comments=["*"])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_comments=["B"])
assert not diff.identical
assert diff.diff_keyword_comments == {"C": [("C", "E")]}
report = diff.report()
assert "Keyword B has different comments" not in report
assert "Keyword C has different comments" in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_comments=["b"])
assert not diff.identical
assert diff.diff_keyword_comments == {"C": [("C", "E")]}
def test_trivial_identical_images(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100).reshape(10, 10)
diff = ImageDataDiff(ia, ib)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_relative_tolerance(self):
ia = np.ones((10, 10)) - 0.00001
ib = np.ones((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_absolute_tolerance(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert not diff.identical
assert diff.diff_total == 100
diff = ImageDataDiff(ia, ib, atol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-5)
assert diff.identical
assert diff.diff_total == 0
def test_not_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-6)
assert not diff.identical
assert diff.diff_total == 100
def test_identical_comp_image_hdus(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/189
For this test we mostly just care that comparing to compressed images
does not crash, and returns the correct results. Two compressed images
will be considered identical if the decompressed data is the same.
Obviously we test whether or not the same compression was used by
looking for (or ignoring) header differences.
"""
data = np.arange(100.0).reshape(10, 10)
hdu = fits.CompImageHDU(data=data)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdula, fits.open(
self.temp("test.fits")
) as hdulb:
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_different_dimensions(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100) - 1
# Although ib could be reshaped into the same dimensions, for now the
# data is not compared anyways
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ((10, 10), (100,))
assert diff.diff_total == 0
report = diff.report()
assert "Data dimensions differ" in report
assert "a: 10 x 10" in report
assert "b: 100" in report
assert "No further data comparison performed."
def test_different_pixels(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100).reshape(10, 10)
ib[0, 0] = 10
ib[5, 5] = 20
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ()
assert diff.diff_total == 2
assert diff.diff_ratio == 0.02
assert diff.diff_pixels == [((0, 0), (0, 10)), ((5, 5), (55, 20))]
def test_identical_tables(self):
c1 = Column("A", format="L", array=[True, False])
c2 = Column("B", format="X", array=[[0], [1]])
c3 = Column("C", format="4I", dim="(2, 2)", array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column("D", format="J", bscale=2.0, array=[0, 1])
c5 = Column("E", format="A3", array=["abc", "def"])
c6 = Column("F", format="E", unit="m", array=[0.0, 1.0])
c7 = Column("G", format="D", bzero=-0.1, array=[0.0, 1.0])
c8 = Column("H", format="C", array=[0.0 + 1.0j, 2.0 + 3.0j])
c9 = Column("I", format="M", array=[4.0 + 5.0j, 6.0 + 7.0j])
c10 = Column("J", format="PI(2)", array=[[0, 1], [2, 3]])
c11 = Column("K", format="QJ(2)", array=[[0, 1], [2, 3]])
columns = [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11]
ta = BinTableHDU.from_columns(columns)
tb = BinTableHDU.from_columns([c.copy() for c in columns])
diff = TableDataDiff(ta.data, tb.data)
assert diff.identical
assert len(diff.common_columns) == 11
assert diff.common_column_names == set("abcdefghijk")
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_diff_empty_tables(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/178
Ensure that diffing tables containing empty data doesn't crash.
"""
c1 = Column("D", format="J")
c2 = Column("E", format="J")
thdu = BinTableHDU.from_columns([c1, c2], nrows=0)
hdula = fits.HDUList([thdu])
hdulb = fits.HDUList([thdu])
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_ignore_table_fields(self):
c1 = Column("A", format="L", array=[True, False])
c2 = Column("B", format="X", array=[[0], [1]])
c3 = Column("C", format="4I", dim="(2, 2)", array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column("B", format="X", array=[[1], [0]])
c5 = Column("C", format="4I", dim="(2, 2)", array=[[1, 2, 3, 4], [5, 6, 7, 8]])
ta = BinTableHDU.from_columns([c1, c2, c3])
tb = BinTableHDU.from_columns([c1, c4, c5])
diff = TableDataDiff(ta.data, tb.data, ignore_fields=["B", "C"])
assert diff.identical
# The only common column should be c1
assert len(diff.common_columns) == 1
assert diff.common_column_names == {"a"}
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_different_table_field_names(self):
ca = Column("A", format="L", array=[True, False])
cb = Column("B", format="L", array=[True, False])
cc = Column("C", format="L", array=[True, False])
ta = BinTableHDU.from_columns([ca, cb])
tb = BinTableHDU.from_columns([ca, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert len(diff.common_columns) == 1
assert diff.common_column_names == {"a"}
assert diff.diff_column_names == (["B"], ["C"])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert "Extra column B of format L in a" in report
assert "Extra column C of format L in b" in report
def test_different_table_field_counts(self):
"""
Test tables with some common columns, but different number of columns
overall.
"""
ca = Column("A", format="L", array=[True, False])
cb = Column("B", format="L", array=[True, False])
cc = Column("C", format="L", array=[True, False])
ta = BinTableHDU.from_columns([cb])
tb = BinTableHDU.from_columns([ca, cb, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == (1, 3)
assert len(diff.common_columns) == 1
assert diff.common_column_names == {"b"}
assert diff.diff_column_names == ([], ["A", "C"])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert " Tables have different number of columns:" in report
assert " a: 1\n b: 3" in report
def test_different_table_rows(self):
"""
Test tables that are otherwise identical but one has more rows than the
other.
"""
ca1 = Column("A", format="L", array=[True, False])
cb1 = Column("B", format="L", array=[True, False])
ca2 = Column("A", format="L", array=[True, False, True])
cb2 = Column("B", format="L", array=[True, False, True])
ta = BinTableHDU.from_columns([ca1, cb1])
tb = BinTableHDU.from_columns([ca2, cb2])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == ()
assert len(diff.common_columns) == 2
assert diff.diff_rows == (2, 3)
assert diff.diff_values == []
report = diff.report()
assert "Table rows differ" in report
assert "a: 2" in report
assert "b: 3" in report
assert "No further data comparison performed."
def test_different_table_data(self):
"""
Test diffing table data on columns of several different data formats
and dimensions.
"""
ca1 = Column("A", format="L", array=[True, False])
ca2 = Column("B", format="X", array=[[0], [1]])
ca3 = Column("C", format="4I", dim="(2, 2)", array=[[0, 1, 2, 3], [4, 5, 6, 7]])
ca4 = Column("D", format="J", bscale=2.0, array=[0.0, 2.0])
ca5 = Column("E", format="A3", array=["abc", "def"])
ca6 = Column("F", format="E", unit="m", array=[0.0, 1.0])
ca7 = Column("G", format="D", bzero=-0.1, array=[0.0, 1.0])
ca8 = Column("H", format="C", array=[0.0 + 1.0j, 2.0 + 3.0j])
ca9 = Column("I", format="M", array=[4.0 + 5.0j, 6.0 + 7.0j])
ca10 = Column("J", format="PI(2)", array=[[0, 1], [2, 3]])
ca11 = Column("K", format="QJ(2)", array=[[0, 1], [2, 3]])
cb1 = Column("A", format="L", array=[False, False])
cb2 = Column("B", format="X", array=[[0], [0]])
cb3 = Column("C", format="4I", dim="(2, 2)", array=[[0, 1, 2, 3], [5, 6, 7, 8]])
cb4 = Column("D", format="J", bscale=2.0, array=[2.0, 2.0])
cb5 = Column("E", format="A3", array=["abc", "ghi"])
cb6 = Column("F", format="E", unit="m", array=[1.0, 2.0])
cb7 = Column("G", format="D", bzero=-0.1, array=[2.0, 3.0])
cb8 = Column("H", format="C", array=[1.0 + 1.0j, 2.0 + 3.0j])
cb9 = Column("I", format="M", array=[5.0 + 5.0j, 6.0 + 7.0j])
cb10 = Column("J", format="PI(2)", array=[[1, 2], [3, 4]])
cb11 = Column("K", format="QJ(2)", array=[[1, 2], [3, 4]])
ta = BinTableHDU.from_columns(
[ca1, ca2, ca3, ca4, ca5, ca6, ca7, ca8, ca9, ca10, ca11]
)
tb = BinTableHDU.from_columns(
[cb1, cb2, cb3, cb4, cb5, cb6, cb7, cb8, cb9, cb10, cb11]
)
diff = TableDataDiff(ta.data, tb.data, numdiffs=20)
assert not diff.identical
# The column definitions are the same, but not the column values
assert diff.diff_columns == ()
assert diff.diff_values[0] == (("A", 0), (True, False))
assert diff.diff_values[1] == (("B", 1), ([1], [0]))
assert diff.diff_values[2][0] == ("C", 1)
assert (diff.diff_values[2][1][0] == [[4, 5], [6, 7]]).all()
assert (diff.diff_values[2][1][1] == [[5, 6], [7, 8]]).all()
assert diff.diff_values[3] == (("D", 0), (0, 2.0))
assert diff.diff_values[4] == (("E", 1), ("def", "ghi"))
assert diff.diff_values[5] == (("F", 0), (0.0, 1.0))
assert diff.diff_values[6] == (("F", 1), (1.0, 2.0))
assert diff.diff_values[7] == (("G", 0), (0.0, 2.0))
assert diff.diff_values[8] == (("G", 1), (1.0, 3.0))
assert diff.diff_values[9] == (("H", 0), (0.0 + 1.0j, 1.0 + 1.0j))
assert diff.diff_values[10] == (("I", 0), (4.0 + 5.0j, 5.0 + 5.0j))
assert diff.diff_values[11][0] == ("J", 0)
assert (diff.diff_values[11][1][0] == [0, 1]).all()
assert (diff.diff_values[11][1][1] == [1, 2]).all()
assert diff.diff_values[12][0] == ("J", 1)
assert (diff.diff_values[12][1][0] == [2, 3]).all()
assert (diff.diff_values[12][1][1] == [3, 4]).all()
assert diff.diff_values[13][0] == ("K", 0)
assert (diff.diff_values[13][1][0] == [0, 1]).all()
assert (diff.diff_values[13][1][1] == [1, 2]).all()
assert diff.diff_values[14][0] == ("K", 1)
assert (diff.diff_values[14][1][0] == [2, 3]).all()
assert (diff.diff_values[14][1][1] == [3, 4]).all()
assert diff.diff_total == 15
assert np.isclose(diff.diff_ratio, 0.682, atol=1e-3, rtol=0)
report = diff.report()
assert "Column A data differs in row 0:\n a> True\n b> False" in report
assert "...and at 1 more indices.\n Column D data differs in row 0:" in report
assert "15 different table data element(s) found (68.18% different)" in report
assert report.count("more indices") == 1
def test_identical_files_basic(self):
"""Test identicality of two simple, extensionless files."""
a = np.arange(100).reshape(10, 10)
hdu = PrimaryHDU(data=a)
hdu.writeto(self.temp("testa.fits"))
hdu.writeto(self.temp("testb.fits"))
diff = FITSDiff(self.temp("testa.fits"), self.temp("testb.fits"))
assert diff.identical
report = diff.report()
# Primary HDUs should contain no differences
assert "Primary HDU" not in report
assert "Extension HDU" not in report
assert "No differences found." in report
a = np.arange(10)
ehdu = ImageHDU(data=a)
diff = HDUDiff(ehdu, ehdu)
assert diff.identical
report = diff.report()
assert "No differences found." in report
def test_partially_identical_files1(self):
"""
Test files that have some identical HDUs but a different extension
count.
"""
a = np.arange(100).reshape(10, 10)
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
hdula = HDUList([phdu, ehdu])
hdulb = HDUList([phdu, ehdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == (2, 3)
# diff_hdus should be empty, since the third extension in hdulb
# has nothing to compare against
assert diff.diff_hdus == []
report = diff.report()
assert "Files contain different numbers of HDUs" in report
assert "a: 2\n b: 3" in report
assert "No differences found between common HDUs" in report
def test_partially_identical_files2(self):
"""
Test files that have some identical HDUs but one different HDU.
"""
a = np.arange(100).reshape(10, 10)
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
ehdu2 = ImageHDU(data=(a + 1))
hdula = HDUList([phdu, ehdu, ehdu])
hdulb = HDUList([phdu, ehdu2, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == ()
assert len(diff.diff_hdus) == 1
assert diff.diff_hdus[0][0] == 1
hdudiff = diff.diff_hdus[0][1]
assert not hdudiff.identical
assert hdudiff.diff_extnames == ()
assert hdudiff.diff_extvers == ()
assert hdudiff.diff_extension_types == ()
assert hdudiff.diff_headers.identical
assert hdudiff.diff_data is not None
datadiff = hdudiff.diff_data
assert isinstance(datadiff, ImageDataDiff)
assert not datadiff.identical
assert datadiff.diff_dimensions == ()
assert datadiff.diff_pixels == [((0, y), (y, y + 1)) for y in range(10)]
assert datadiff.diff_ratio == 1.0
assert datadiff.diff_total == 100
report = diff.report()
# Primary HDU and 2nd extension HDU should have no differences
assert "Primary HDU" not in report
assert "Extension HDU 2" not in report
assert "Extension HDU 1" in report
assert "Headers contain differences" not in report
assert "Data contains differences" in report
for y in range(10):
assert f"Data differs at [{y + 1}, 1]" in report
assert "100 different pixels found (100.00% different)." in report
def test_partially_identical_files3(self):
"""
Test files that have some identical HDUs but a different extension
name.
"""
phdu = PrimaryHDU()
ehdu = ImageHDU(name="FOO")
hdula = HDUList([phdu, ehdu])
ehdu = BinTableHDU(name="BAR")
ehdu.header["EXTVER"] = 2
ehdu.header["EXTLEVEL"] = 3
hdulb = HDUList([phdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdus[0][0] == 1
hdu_diff = diff.diff_hdus[0][1]
assert hdu_diff.diff_extension_types == ("IMAGE", "BINTABLE")
assert hdu_diff.diff_extnames == ("FOO", "BAR")
assert hdu_diff.diff_extvers == (1, 2)
assert hdu_diff.diff_extlevels == (1, 3)
report = diff.report()
assert "Extension types differ" in report
assert "a: IMAGE\n b: BINTABLE" in report
assert "Extension names differ" in report
assert "a: FOO\n b: BAR" in report
assert "Extension versions differ" in report
assert "a: 1\n b: 2" in report
assert "Extension levels differ" in report
assert "a: 1\n b: 2" in report
def test_diff_nans(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/204
"""
# First test some arrays that should be equivalent....
arr = np.empty((10, 10), dtype=np.float64)
arr[:5] = 1.0
arr[5:] = np.nan
arr2 = arr.copy()
table = np.rec.array(
[(1.0, 2.0), (3.0, np.nan), (np.nan, np.nan)], names=["cola", "colb"]
).view(fits.FITS_rec)
table2 = table.copy()
assert ImageDataDiff(arr, arr2).identical
assert TableDataDiff(table, table2).identical
# Now let's introduce some differences, where there are nans and where
# there are not nans
arr2[0][0] = 2.0
arr2[5][0] = 2.0
table2[0][0] = 2.0
table2[1][1] = 2.0
diff = ImageDataDiff(arr, arr2)
assert not diff.identical
assert diff.diff_pixels[0] == ((0, 0), (1.0, 2.0))
assert diff.diff_pixels[1][0] == (5, 0)
assert np.isnan(diff.diff_pixels[1][1][0])
assert diff.diff_pixels[1][1][1] == 2.0
diff = TableDataDiff(table, table2)
assert not diff.identical
assert diff.diff_values[0] == (("cola", 0), (1.0, 2.0))
assert diff.diff_values[1][0] == ("colb", 1)
assert np.isnan(diff.diff_values[1][1][0])
assert diff.diff_values[1][1][1] == 2.0
def test_file_output_from_path_string(self):
outpath = self.temp("diff_output.txt")
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
hb["C"] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
with open(outpath) as fout:
assert fout.read() == report_as_string
def test_file_output_overwrite_safety(self):
outpath = self.temp("diff_output.txt")
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
hb["C"] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
diffobj.report(fileobj=outpath)
def test_file_output_overwrite_success(self):
outpath = self.temp("diff_output.txt")
ha = Header([("A", 1), ("B", 2), ("C", 3)])
hb = ha.copy()
hb["C"] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
diffobj.report(fileobj=outpath, overwrite=True)
with open(outpath) as fout:
assert (
fout.read() == report_as_string
), "overwritten output file is not identical to report string"
def test_rawdatadiff_nodiff(self):
a = np.arange(100, dtype="uint8").reshape(10, 10)
b = a.copy()
hdu_a = DummyNonstandardExtHDU(data=a)
hdu_b = DummyNonstandardExtHDU(data=b)
diff = HDUDiff(hdu_a, hdu_b)
assert diff.identical
report = diff.report()
assert "No differences found." in report
def test_rawdatadiff_dimsdiff(self):
a = np.arange(100, dtype="uint8") + 10
b = a[:80].copy()
hdu_a = DummyNonstandardExtHDU(data=a)
hdu_b = DummyNonstandardExtHDU(data=b)
diff = HDUDiff(hdu_a, hdu_b)
assert not diff.identical
report = diff.report()
assert "Data sizes differ:" in report
assert "a: 100 bytes" in report
assert "b: 80 bytes" in report
assert "No further data comparison performed." in report
def test_rawdatadiff_bytesdiff(self):
a = np.arange(100, dtype="uint8") + 10
b = a.copy()
changes = [(30, 200), (89, 170)]
for i, v in changes:
b[i] = v
hdu_a = DummyNonstandardExtHDU(data=a)
hdu_b = DummyNonstandardExtHDU(data=b)
diff = HDUDiff(hdu_a, hdu_b)
assert not diff.identical
diff_bytes = diff.diff_data.diff_bytes
assert len(changes) == len(diff_bytes)
for j, (i, v) in enumerate(changes):
assert diff_bytes[j] == (i, (i + 10, v))
report = diff.report()
assert "Data contains differences:" in report
for i, _ in changes:
assert f"Data differs at byte {i}:" in report
assert "2 different bytes found (2.00% different)." in report
def test_fitsdiff_hdu_name(tmp_path):
"""Make sure diff report reports HDU name and ver if same in files"""
path1 = tmp_path / "test1.fits"
path2 = tmp_path / "test2.fits"
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5), name="SCI")])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert "Extension HDU 1 (SCI, 1):" in diff.report()
def test_fitsdiff_no_hdu_name(tmp_path):
"""Make sure diff report doesn't report HDU name if not in files"""
path1 = tmp_path / "test1.fits"
path2 = tmp_path / "test2.fits"
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert "Extension HDU 1:" in diff.report()
def test_fitsdiff_with_names(tmp_path):
"""Make sure diff report doesn't report HDU name if not same in files"""
path1 = tmp_path / "test1.fits"
path2 = tmp_path / "test2.fits"
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5), name="SCI", ver=1)])
hdulist.writeto(path1)
hdulist[1].name = "ERR"
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert "Extension HDU 1:" in diff.report()
def test_rawdatadiff_diff_with_rtol(tmp_path):
"""Regression test for https://github.com/astropy/astropy/issues/13330"""
path1 = tmp_path / "test1.fits"
path2 = tmp_path / "test2.fits"
a = np.zeros((10, 2), dtype="float32")
a[:, 0] = np.arange(10, dtype="float32") + 10
a[:, 1] = np.arange(10, dtype="float32") + 20
b = a.copy()
changes = [(3, 13.1, 23.1), (8, 20.5, 30.5)]
for i, v, w in changes:
b[i, 0] = v
b[i, 1] = w
ca = Column("A", format="20E", array=[a])
cb = Column("A", format="20E", array=[b])
hdu_a = BinTableHDU.from_columns([ca])
hdu_a.writeto(path1, overwrite=True)
hdu_b = BinTableHDU.from_columns([cb])
hdu_b.writeto(path2, overwrite=True)
with fits.open(path1) as fits1:
with fits.open(path2) as fits2:
diff = FITSDiff(fits1, fits2, atol=0, rtol=0.001)
str1 = diff.report(fileobj=None, indent=0)
diff = FITSDiff(fits1, fits2, atol=0, rtol=0.01)
str2 = diff.report(fileobj=None, indent=0)
assert "...and at 1 more indices." in str1
assert "...and at 1 more indices." not in str2
|
189ad884977eb5335266093451405ac47dc983650cdad07f4eea716dcbf6624d | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import warnings
from io import BytesIO, StringIO
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.card import _pad
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import encode_ascii
from astropy.io.fits.verify import VerifyError, VerifyWarning
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([("a", 1), ("b", 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header["c"] = 100
assert "c" not in copied_header
# and changing the copy should not change the original.
copied_header["a"] = 0
assert original_header["a"] == 1
def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([("a", 10)])
new_header = fits.Header(original_header, copy=True)
original_header["a"] = 20
assert new_header["a"] == 10
new_header["a"] = 0
assert original_header["a"] == 20
def test_init_with_dict():
dict1 = {"a": 11, "b": 12, "c": 13, "d": 14, "e": 15}
h1 = fits.Header(dict1)
for i in dict1:
assert dict1[i] == h1[i]
def test_init_with_ordereddict():
# Create a list of tuples. Each tuple consisting of a letter and the number
list1 = [(i, j) for j, i in enumerate("abcdefghijklmnopqrstuvwxyz")]
# Create an ordered dictionary and a header from this dictionary
dict1 = collections.OrderedDict(list1)
h1 = fits.Header(dict1)
# Check that the order is preserved of the initial list
assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))
class TestHeaderFunctions(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
header.rename_keyword("A", "B")
assert "A" not in header
assert "B" in header
assert header[0] == "B"
assert header["B"] == "B"
assert header.comments["B"] == "C"
@pytest.mark.parametrize("key", ["A", "a"])
def test_indexing_case(self, key):
"""Check that indexing is case insensitive"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
assert key in header
assert header[key] == "B"
assert header.get(key) == "B"
assert header.index(key) == 0
assert header.comments[key] == "C"
assert header.count(key) == 1
header.remove(key, ignore_missing=False)
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert c.keyword == ""
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == "ABC"
assert c.value == "abc"
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card("abc", "<8 ch")
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card("nullstr", "")
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring("ABC = F")
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card("long_int", -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card("floatnum", -467374636747637647347374734737437.0)
if str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and str(c) != _pad(
"FLOATNUM= -4.6737463674763E+032"
):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_floating_point_string_representation_card(self):
"""
Ensures Card formats float values with the correct precision, avoiding
comment truncation
Regression test for https://github.com/astropy/astropy/issues/14507
"""
k = "HIERARCH ABC DEF GH IJKLMN"
com = "[m] abcdef ghijklm nopqrstu vw xyzab"
c = fits.Card(k, 0.009125, com)
expected_str = f"{k} = 0.009125 / {com}"
assert str(c)[: len(expected_str)] == expected_str
c = fits.Card(k, 8.95, com)
expected_str = f"{k} = 8.95 / {com}"
assert str(c)[: len(expected_str)] == expected_str
c = fits.Card(k, -99.9, com)
expected_str = f"{k} = -99.9 / {com}"
assert str(c)[: len(expected_str)] == expected_str
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card("abc", (1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
c = fits.Card("abc", 9, "abcde" * 20)
with pytest.warns(fits.verify.VerifyWarning):
assert (
str(c) == "ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab"
)
c = fits.Card("abc", "a" * 68, "abcdefg")
with pytest.warns(fits.verify.VerifyWarning):
assert str(c) == f"ABC = '{'a' * 68}'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ("abc",), {"value": (2, 3)})
pytest.raises(ValueError, fits.Card, "key", [], "comment")
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
pytest.warns(UserWarning, fits.Card, "abcdefghi", "long")
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with pytest.warns(AstropyUserWarning) as w:
c = fits.Card("abc+", 9)
assert len(w) == 1
assert c.image == _pad("HIERARCH abc+ = 9")
def test_add_history(self):
header = fits.Header(
[
("A", "B", "C"),
("HISTORY", 1),
("HISTORY", 2),
("HISTORY", 3),
("", "", ""),
("", "", ""),
]
)
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header["HISTORY"] == [1, 2, 3, 4]
assert repr(header["HISTORY"]) == "1\n2\n3\n4"
header.add_history(0, after="A")
assert len(header) == 6
assert header.cards[1].value == 0
assert header["HISTORY"] == [0, 1, 2, 3, 4]
def test_add_blank(self):
header = fits.Header(
[("A", "B", "C"), ("", 1), ("", 2), ("", 3), ("", "", ""), ("", "", "")]
)
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[""] == [1, 2, 3, "", "", 4]
assert repr(header[""]) == "1\n2\n3\n\n\n4"
header.add_blank(0, after="A")
assert len(header) == 8
assert header.cards[1].value == 0
assert header[""] == [0, 1, 2, 3, "", "", 4]
header[""] = 5
header[" "] = 6
assert header[""] == [0, 1, 2, 3, "", "", 4, 5, 6]
assert header[" "] == [0, 1, 2, 3, "", "", 4, 5, 6]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({"FOO": ("BAR", "BAZ")})
header.update(FakeHeader([("A", 1), ("B", 2, "comment")]))
assert set(header.keys()) == {"FOO", "A", "B"}
assert header.comments["B"] == "comment"
# test that comments are preserved
tmphdr = fits.Header()
tmphdr["HELLO"] = (1, "this is a comment")
header.update(tmphdr)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO"}
assert header.comments["HELLO"] == "this is a comment"
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO", "NAXIS1", "NAXIS2"}
assert set(header.values()) == {"BAR", 1, 2, 100, 100}
def test_update_comment(self):
hdul = fits.open(self.data("arange.fits"))
hdul[0].header.update({"FOO": ("BAR", "BAZ")})
assert hdul[0].header["FOO"] == "BAR"
assert hdul[0].header.comments["FOO"] == "BAZ"
with pytest.raises(ValueError):
hdul[0].header.update({"FOO2": ("BAR", "BAZ", "EXTRA")})
hdul.writeto(self.temp("test.fits"))
hdul.close()
hdul = fits.open(self.temp("test.fits"), mode="update")
hdul[0].header.comments["FOO"] = "QUX"
hdul.close()
hdul = fits.open(self.temp("test.fits"))
assert hdul[0].header.comments["FOO"] == "QUX"
hdul[0].header.add_comment(0, after="FOO")
assert str(hdul[0].header.cards[-1]).strip() == "COMMENT 0"
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad("HISTORY " + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad("COMMENT " + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value."
)
assert (
c.value == "card has no comments. "
"/ text after slash is still part of the value."
)
assert c.comment == ""
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part of the
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card("", " / EXPOSURE INFORMATION")
assert str(c) == _pad(" / EXPOSURE INFORMATION")
c = fits.Card.fromstring(str(c))
assert c.keyword == ""
assert c.value == " / EXPOSURE INFORMATION"
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring("ABC = (8, 9)")
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring("abc = + 2.1 e + 12")
assert c.value == 2100000000000.0
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes / let's also try the comment"
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert (
str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment "
)
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring("ABC = ")
assert str(c) == _pad("ABC =")
def test_undefined_keys_values(self):
header = fits.Header()
header["FOO"] = "BAR"
header["UNDEF"] = None
assert list(header.values()) == ["BAR", None]
assert list(header.items()) == [("FOO", "BAR"), ("UNDEF", None)]
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring("XYZ= 100")
assert c.keyword == "XYZ"
assert c.value == 100
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(AstropyUserWarning) as w:
c.verify()
err_text1 = "Card 'ABC' is not FITS standard (equal sign not at column 8)"
err_text2 = "Card 'ABC' is not FITS standard (invalid value string: 'a6'"
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
fix_text = "Fixed 'ABC' card to meet the FITS standard."
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(AstropyUserWarning, match=fix_text) as w:
c.verify("fix")
assert len(w) == 4
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
def test_long_string_value_with_multiple_long_words(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11298
"""
c = fits.Card(
"WHATEVER",
"SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_"
"03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY"
"_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml",
)
assert (
str(c)
== "WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'"
"CONTINUE '.h5 &' "
"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'"
"CONTINUE 'xml' "
)
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1["TEST"] = "abcdefg" * 30
h2 = fits.Header()
h2["TEST"] = "abcdefg" * 30
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header["TEST1"] = ("Regular value", "Regular comment")
header["TEST2"] = ("long string value " * 10, "long comment " * 10)
header["TEST3"] = ("Regular value", "Regular comment")
assert repr(header).splitlines() == [
str(fits.Card("TEST1", "Regular value", "Regular comment")),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card("TEST3", "Regular value", "Regular comment")),
]
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = "long string value " * 10
header = fits.Header()
header[""] = value
assert len(header) == 3
assert " ".join(header[""]) == value.rstrip()
# Ensure that this works like other commentary keywords
header["COMMENT"] = value
header["HISTORY"] = value
assert header["COMMENT"] == header["HISTORY"]
assert header["COMMENT"] == header[""]
def test_long_string_from_file(self):
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
c = hdul[0].header.cards["abc"]
hdul.close()
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card("abc", "longstringvalue" * 10, "longcomment" * 10)
assert (
str(c)
== "ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment "
)
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' / comments in line 1")
+ _pad(
"continue 'continue with long string but without the "
"ampersand at the end' /"
)
+ _pad(
"continue 'continue must have string value (with quotes)' "
"/ comments with ''. "
)
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert (
str(c)
== "ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. "
)
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad(
"EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'"
)
+ _pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'")
+ _pad("CONTINUE '&' / pysyn expression")
)
assert c.keyword == "EXPR"
assert (
c.value == "/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits "
"* 5.87359e-12 * MWAvg(Av=0.12)"
)
assert c.comment == "pysyn expression"
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h["SVALUE"] = "A" * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card("TEST", "long value" * 10, "long comment &" * 10)
assert (
str(c)
== "TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & "
)
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with pytest.warns(
AstropyUserWarning, match="HIERARCH card will be created"
) as w:
c = fits.Card(
"ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert len(w) == 1
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
# Test manual creation of hierarch card
c = fits.Card("hierarch abcdefghi", 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card(
"HIERARCH ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_hierarch_not_warn(self):
"""Check that compressed image headers do not issue HIERARCH warnings."""
filename = fits.util.get_testdata_filepath("compressed_image.fits")
with fits.open(filename) as hdul:
header = hdul[1].header
with warnings.catch_warnings(record=True) as warning_list:
header["HIERARCH LONG KEYWORD"] = 42
assert len(warning_list) == 0
assert header["LONG KEYWORD"] == 42
assert header["HIERARCH LONG KEYWORD"] == 42
# Check that it still warns if we do not use HIERARCH
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["LONG KEYWORD2"] = 1
assert header["LONG KEYWORD2"] == 1
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring("HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
# Test also with creation via the Card constructor
c = fits.Card("HIERARCH key.META_4", "calFileVersion")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card("HIERARCH WeirdCard.~!@#_^$%&", "The value", "a comment")
# This should not raise any exceptions
c.verify("exception")
assert c.keyword == "WeirdCard.~!@#_^$%&"
assert c.value == "The value"
assert c.comment == "a comment"
# Test also the specific case from the original bug report
header = fits.Header(
[
("simple", True),
("BITPIX", 8),
("NAXIS", 0),
("EXTEND", True, "May contain datasets"),
("HIERARCH key.META_0", "detRow"),
]
)
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
header2 = hdul[0].header
assert str(header.cards[header.index("key.META_0")]) == str(
header2.cards[header2.index("key.META_0")]
)
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
# De-referencing header through the inline function should behave
# identically to accessing it in the pytest.raises context below.
pytest.raises(KeyError, lambda k: header[k], "NAXIS")
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."):
header["NAXIS"]
def test_hierarch_card_lookup(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
assert "abcdefghi" in header
assert header["abcdefghi"] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert "ABCDEFGHI" in header
def test_hierarch_card_delete(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
del header["hierarch abcdefghi"]
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["abcdefghi"] = 10
header["abcdefgh"] = 10
header["abcdefg"] = 10
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header["abcdefghij"]
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header[2]
assert list(header.keys())[2] == "abcdefg".upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLAH BLAH": "TESTA"})
assert len(w) == 0
assert "BLAH BLAH" in header
assert header["BLAH BLAH"] == "TESTA"
header.update({"HIERARCH BLAH BLAH": "TESTB"})
assert len(w) == 0
assert header["BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH": "TESTC"})
assert len(w) == 1
assert len(header) == 1
assert header["BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["blah blah"], "TESTD"
header.update({"blah blah": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["blah blah"], "TESTE"
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({"BLAH BLAH BLAH": "TESTA"})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({"HIERARCH BLAH BLAH BLAH": "TESTB"})
assert len(w) == 3
assert header["BLAH BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH BLAH": "TESTC"})
assert len(w) == 4
assert header["BLAH BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah blah": "TESTD"})
assert len(w) == 4
assert header["blah blah blah"], "TESTD"
header.update({"blah blah blah": "TESTE"})
assert len(w) == 5
assert header["blah blah blah"], "TESTE"
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLA BLA": "TESTA"})
assert len(w) == 0
assert "BLA BLA" in header
assert header["BLA BLA"] == "TESTA"
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 0
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 1
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTE"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({"BLA BLA": "TESTA"})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 1
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 2
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 3
assert len(header) == 1
assert header["bla bla"], "TESTE"
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header["FOO"] = ("bar", "baz", "qux")
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header["FOO"] = ("BAR",)
header["FOO2"] = (None,)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == ""
assert header.comments["FOO"] == ""
def test_header_setitem_2tuple(self):
header = fits.Header()
header["FOO"] = ("BAR", "BAZ")
header["FOO2"] = (None, None)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == "BAZ"
assert header.comments["FOO"] == "BAZ"
assert header.comments["FOO2"] == ""
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header["FOO"] = "BAR"
assert header["FOO"] == "BAR"
header["FOO"] = None
assert header["FOO"] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep="\n")
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header["UNDEF3"] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header["DEFINED"] == 42
assert header["UNDEF"] is None
assert header["UNDEF2"] is None
assert header["UNDEF3"] is None
assert header["UNDEF5"] is None
assert header["UNDEF6"] is None
# Assign an undefined value to a new card
header["UNDEF4"] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([("A", "B", "C")])
header.set("A", comment="D")
assert header["A"] == "B"
assert header.comments["A"] == "D"
def test_header_iter(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header) == ["A", "C"]
def test_header_slice(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
newheader = header[1:]
assert len(newheader) == 2
assert "A" not in newheader
assert "C" in newheader
assert "E" in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == "F"
assert newheader[1] == "D"
assert newheader[2] == "B"
newheader = header[::2]
assert len(newheader) == 2
assert "A" in newheader
assert "C" not in newheader
assert "E" in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = "GH"
assert header[1] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header[1:] = ["H", "I"]
assert header[1] == "H"
assert header[2] == "I"
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
del header[1:]
assert len(header) == 1
assert header[0] == "B"
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
newheader = header["AB*"]
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([("DATE", 1), ("DATE-OBS", 2), ("DATE-FOO", 3)])
assert len(header["DATE*"]) == 3
assert len(header["DATE?*"]) == 2
assert len(header["DATE-*"]) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header["AB*"] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header["AB*"] = "GH"
assert header[0] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header["AB*"] = ["H", "I"]
assert header[0] == "H"
assert header[2] == "I"
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
del header["AB*"]
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header(
[
("ABC", 0),
("HISTORY", 1),
("HISTORY", 2),
("DEF", 3),
("HISTORY", 4),
("HISTORY", 5),
]
)
assert header["HISTORY"] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([("A", "B"), ("C", "D")])
header.clear()
assert "A" not in header
assert "C" not in header
assert len(header) == 0
@pytest.mark.parametrize("fitsext", [fits.ImageHDU(), fits.CompImageHDU()])
def test_header_clear_write(self, fitsext):
hdulist = fits.HDUList([fits.PrimaryHDU(), fitsext])
hdulist[1].header["FOO"] = "BAR"
hdulist[1].header.clear()
with pytest.raises(VerifyError) as err:
hdulist.writeto(self.temp("temp.fits"), overwrite=True)
err_msg = "'XTENSION' card does not exist."
assert err_msg in str(err.value)
def test_header_fromkeys(self):
header = fits.Header.fromkeys(["A", "B"])
assert "A" in header
assert header["A"] is None
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] is None
assert header.comments["B"] == ""
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(["A", "B"], "C")
assert "A" in header
assert header["A"] == "C"
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] == "C"
assert header.comments["B"] == ""
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(["A"], ("B", "C"))
assert "A" in header
assert header["A"] == "B"
assert header.comments["A"] == "C"
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(["A", "B", "A"], "C")
assert "A" in header
assert ("A", 0) in header
assert ("A", 1) in header
assert ("A", 2) not in header
assert header[0] == "C"
assert header["A"] == "C"
assert header[("A", 0)] == "C"
assert header[2] == "C"
assert header[("A", 1)] == "C"
def test_header_items(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header.items()) == [("A", "B"), ("C", "D")]
def test_header_iterkeys(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.values(), ["B", "D"]):
assert a == b
def test_header_keys(self):
with fits.open(self.data("arange.fits")) as hdul:
assert list(hdul[0].header) == [
"SIMPLE",
"BITPIX",
"NAXIS",
"NAXIS1",
"NAXIS2",
"NAXIS3",
"EXTEND",
]
def test_header_list_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
last = header.pop()
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop(1)
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop(0)
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
pytest.raises(TypeError, header.pop, "A", "B", "C")
last = header.pop("G")
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop("C")
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop("A")
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
default = header.pop("X", "Y")
assert default == "Y"
assert len(header) == 1
pytest.raises(KeyError, header.pop, "X")
def test_popitem(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.setdefault("A") == "B"
assert header.setdefault("C") == "D"
assert header.setdefault("E") == "F"
assert len(header) == 3
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
assert "G" in header
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update({"A": "E", "F": "G"})
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([("A", "B"), ("C", "D")])
header.update(A="E", F="G")
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update([("A", "E"), fits.Card("F", "G")])
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header["MYKEY"] = ("some val", "some comment")
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == "XTENSION"
assert hdu.header[-1] == "some val"
assert ("MYKEY", 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == "some val"
assert hdu.header[-1] == "some other val"
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu2.header["HISTORY"] = "history 1"
hdu2.header["HISTORY"] = "history 2"
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) in hdu.header
assert hdu.header[("MYKEY", 1)] == "some other val"
assert len(hdu.header["HISTORY"]) == 3
assert hdu.header[-1] == "history 2"
hdu = fits.PrimaryHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) not in hdu.header
assert hdu.header["MYKEY"] == "some other val"
assert len(hdu.header["HISTORY"]) == 2
assert hdu.header[-1] == "history 2"
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data("test0.fits"))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.count("A") == 1
assert header.count("C") == 1
assert header.count("E") == 1
header["HISTORY"] = "a"
header["HISTORY"] = "b"
assert header.count("HISTORY") == 2
pytest.raises(KeyError, header.count, "G")
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ""
assert header[-2] == ""
# New card should fill the first blank by default
header.append(("E", "F"))
assert len(header) == 4
assert header[-2] == "F"
assert header[-1] == ""
# This card should not use up a blank spot
header.append(("G", "H"), useblanks=False)
assert len(header) == 5
assert header[-1] == ""
assert header[-2] == "H"
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.append("E")
assert len(header) == 3
assert list(header)[-1] == "E"
assert header[-1] is None
assert header.comments["E"] == ""
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append("")
assert len(header) == 4
assert list(header)[-1] == ""
assert header[""] == ""
assert header.comments[""] == ""
def test_header_insert_use_blanks(self):
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ("E", "F"))
assert len(header) == 4
assert header[1] == "F"
assert header[-1] == ""
assert header[-2] == "D"
# Insert a new card without using blanks
header.insert(1, ("G", "H"), useblanks=False)
assert len(header) == 5
assert header[1] == "H"
assert header[-1] == ""
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header(
[("NAXIS1", 10), ("COMMENT", "Comment 1"), ("COMMENT", "Comment 3")]
)
header.insert("NAXIS1", ("NAXIS", 2, "Number of axes"))
assert list(header.keys())[0] == "NAXIS"
assert header[0] == 2
assert header.comments[0] == "Number of axes"
header.insert("NAXIS1", ("NAXIS2", 20), after=True)
assert list(header.keys())[1] == "NAXIS1"
assert list(header.keys())[2] == "NAXIS2"
assert header[2] == 20
header.insert(("COMMENT", 1), ("COMMENT", "Comment 2"))
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3"]
header.insert(("COMMENT", 2), ("COMMENT", "Comment 4"), after=True)
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3", "Comment 4"]
header.insert(-1, ("TEST1", True))
assert list(header.keys())[-2] == "TEST1"
header.insert(-1, ("TEST2", True), after=True)
assert list(header.keys())[-1] == "TEST2"
assert list(header.keys())[-3] == "TEST1"
def test_remove(self):
header = fits.Header([("A", "B"), ("C", "D")])
# When keyword is present in the header it should be removed.
header.remove("C")
assert len(header) == 1
assert list(header) == ["A"]
assert "C" not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove("F")
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove("F", ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([("A", "B"), ("C", "D"), ("A", "F")])
header.remove("A", remove_all=True)
assert "A" not in header
assert len(header) == 1
assert list(header) == ["C"]
assert header[0] == "D"
def test_header_comments(self):
header = fits.Header([("A", "B", "C"), ("DEF", "G", "H")])
assert repr(header.comments) == " A C\n DEF H"
def test_comment_slices_and_filters(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
s = header.comments[1:]
assert list(s) == ["H", "K"]
s = header.comments[::-1]
assert list(s) == ["K", "H", "D"]
s = header.comments["A*"]
assert list(s) == ["D", "K"]
def test_comment_slice_filter_assign(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
header.comments[1:] = "L"
assert list(header.comments) == ["D", "L", "L"]
assert header.cards[header.index("AB")].comment == "D"
assert header.cards[header.index("EF")].comment == "L"
assert header.cards[header.index("AI")].comment == "L"
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ["L", "L", "D"]
header.comments["A*"] = ["M", "N"]
assert list(header.comments) == ["M", "L", "N"]
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header["HISTORY"] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header["HISTORY"][1:] == indices[1:]
assert header["HISTORY"][:3] == indices[:3]
assert header["HISTORY"][:6] == indices[:6]
assert header["HISTORY"][:-2] == indices[:-2]
assert header["HISTORY"][::-1] == indices[::-1]
assert header["HISTORY"][1::-1] == indices[1::-1]
assert header["HISTORY"][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ("A", "B", "C"))
header.append(("D", "E", "F"), end=True)
assert list(header["HISTORY"][1:]) == indices[1:]
assert list(header["HISTORY"][:3]) == indices[:3]
assert list(header["HISTORY"][:6]) == indices[:6]
assert list(header["HISTORY"][:-2]) == indices[:-2]
assert list(header["HISTORY"][::-1]) == indices[::-1]
assert list(header["HISTORY"][1::-1]) == indices[1::-1]
assert list(header["HISTORY"][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header["FOO"] = "BAR"
header["HISTORY"] = "ABC"
header["FRED"] = "BARNEY"
header["HISTORY"] = "DEF"
header["HISTORY"] = "GHI"
assert header["HISTORY"] == ["ABC", "DEF", "GHI"]
# Single value update
header["HISTORY"][0] = "FOO"
assert header["HISTORY"] == ["FOO", "DEF", "GHI"]
# Single value partial slice update
header["HISTORY"][1:] = "BAR"
assert header["HISTORY"] == ["FOO", "BAR", "BAR"]
# Multi-value update
header["HISTORY"][:] = ["BAZ", "QUX"]
assert header["HISTORY"] == ["BAZ", "QUX", "BAR"]
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header["HISTORY"] = "hello world"
header["HISTORY"] = "hello world"
header["COMMENT"] = "hello world"
assert header["HISTORY"] != header["COMMENT"]
header["COMMENT"] = "hello world"
assert header["HISTORY"] == header["COMMENT"]
def test_long_commentary_card(self):
header = fits.Header()
header["FOO"] = "BAR"
header["BAZ"] = "QUX"
longval = "ABC" * 30
header["HISTORY"] = longval
header["FRED"] = "BARNEY"
header["HISTORY"] = longval
assert len(header) == 7
assert list(header)[2] == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.set("HISTORY", longval, after="FOO")
assert len(header) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
header = fits.Header()
header.update({"FOO": "BAR"})
header.update({"BAZ": "QUX"})
longval = "ABC" * 30
header.add_history(longval)
header.update({"FRED": "BARNEY"})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.add_history(longval, after="FOO")
assert len(header.cards) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
def test_totxtfile(self, home_is_temp):
header_filename = self.temp("header.txt")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.totextfile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.totextfile(header_filename, overwrite=False)
hdul[0].header.totextfile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_tofile(self, home_is_temp):
"""
Repeat test_totxtfile, but with tofile()
"""
header_filename = self.temp("header.fits")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.tofile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.tofile(header_filename, overwrite=False)
hdul[0].header.tofile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711"""
filename = self.data("scale.fits")
hdr = fits.Header.fromfile(filename)
assert hdr["DATASET"] == "2MASS"
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header["A"] = ("B", "C")
header["B"] = ("C", "D")
header["C"] = ("D", "E")
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
f.write("\nEND")
new_header = fits.Header.fromtextfile(self.temp("test.hdr"))
assert "END" not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, "END", "")
pytest.raises(ValueError, header.append, "END")
pytest.raises(ValueError, header.append, "END", end=True)
pytest.raises(ValueError, header.insert, len(header), "END")
pytest.raises(ValueError, header.set, "END")
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep="", endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += " " * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appended by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header("END =", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header("END = ", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# END card with trailing gibberish
s = invalid_header("END$%&%^*%*", True)
with pytest.warns(
AstropyUserWarning,
match=r"Unexpected bytes trailing END keyword: '\$%&%\^\*%\*'",
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header("END", False)
with pytest.warns(
AstropyUserWarning, match="Missing padding to end of the FITS block"
) as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h["FOO"] = "BAR"
h["COMMENT"] = "hello"
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
out = f.read()
out = out.replace(b"hello", "héllo".encode("latin1"))
out = out.replace(b"BAR", "BÀR".encode("latin1"))
with open(self.temp("test2.fits"), "wb") as f2:
f2.write(out)
with pytest.warns(
AstropyUserWarning,
match="non-ASCII characters are present in the FITS file",
) as w:
h = fits.getheader(self.temp("test2.fits"))
assert h["FOO"] == "B?R"
assert h["COMMENT"] == "h?llo"
assert len(w) == 1
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([("A", "B"), ("B", "C"), ("C", "D")])
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after=0)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before="C")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after="A")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set("C", before=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("C", after=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep="\n")
# First the case that *does* work prior to fixing this issue
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards["FOCALLEN"]._modified
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards["APERTURE"]._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep="\n")
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards["FOCALLEN"]._modified
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards["APERTURE"]._modified
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h["FOCALLEN"] = 155.0
h["APERTURE"] = 0.0
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header["TEST"] = 5.0022221e-07
hdu.writeto(self.temp("test.fits"))
# Here we manually make the file invalid
with open(self.temp("test.fits"), "rb+") as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii("e"))
with fits.open(self.temp("test.fits")) as hdul, pytest.warns(
AstropyUserWarning
) as w:
hdul.writeto(self.temp("temp.fits"), output_verify="warn")
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad("FOO = T")
barimg = _pad("BAR = F")
h = fits.Header()
h["FOO"] = True
h["BAR"] = False
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h["FOO"] = np.bool_(True)
h["BAR"] = np.bool_(False)
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([("abC", 1), ("Def", 2), ("GeH", 3)])
assert list(h) == ["ABC", "DEF", "GEH"]
assert "abc" in h
assert "dEf" in h
assert h["geh"] == 3
# Case insensitivity of wildcards
assert len(h["g*"]) == 1
h["aBc"] = 2
assert h["abc"] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h["gEh"]
assert list(h) == ["ABC", "DEF"]
assert len(h) == 2
assert h.get("def") == 2
h.set("Abc", 3)
assert h["ABC"] == 3
h.set("gEh", 3, before="Abc")
assert list(h) == ["GEH", "ABC", "DEF"]
assert h.pop("abC") == 3
assert len(h) == 2
assert h.setdefault("def", 3) == 2
assert len(h) == 2
assert h.setdefault("aBc", 1) == 1
assert len(h) == 3
assert list(h) == ["GEH", "DEF", "ABC"]
h.update({"GeH": 1, "iJk": 4})
assert len(h) == 4
assert list(h) == ["GEH", "DEF", "ABC", "IJK"]
assert h["GEH"] == 1
assert h.count("ijk") == 1
assert h.index("ijk") == 3
h.remove("Def")
assert len(h) == 3
assert list(h) == ["GEH", "ABC", "IJK"]
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header["TESTKW"] = ("Test val", "This is the END")
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
hdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp("test.hdr"))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = "\u30a8\u30ea\u30c3\u30af"
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h["FOO"] = "BAR"
assert "FOO" in h
assert h["FOO"] == "BAR"
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, "BAR")
h["FOO"] = "BAZ"
assert h["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, "FOO", erikku)
h["FOO"] = ("BAR", "BAZ")
assert h["FOO"] == "BAR"
assert h.comments["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, "FOO", ("BAR", erikku))
pytest.raises(ValueError, assign, "FOO", (erikku, "BAZ"))
pytest.raises(ValueError, assign, "FOO", (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
with pytest.raises(ValueError, match="Illegal value: b'Hello'."):
h.set("TEST", b"Hello")
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h["FOO"] = "Bar "
assert h["FOO"] == "Bar"
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp("strip_header_whitespace", False):
assert h["FOO"] == "Bar "
assert h["QUX"] == "Bar "
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
assert h["FOO"] == "Bar"
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the original header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = [
"CCD parameters table ...",
" reference table oref$n951041ko_ccd.fits",
" INFLIGHT 12/07/2001 25/02/2002",
" all bias frames",
] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header["HISTORY"] = item
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header["HISTORY"] == hdu.header["HISTORY"]
new_hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring("CLFIND2D: contour = 0.30")
c2 = fits.Card.fromstring("Just some random text.")
c3 = fits.Card.fromstring("A" * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with pytest.warns(AstropyUserWarning) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp("test.fits"))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp("test.fits")) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert "CLFIND2D" in header
assert "Just som" in header
assert "AAAAAAAA" in header
assert header["CLFIND2D"] == ": contour = 0.30"
assert header["Just som"] == "e random text."
assert header["AAAAAAAA"] == "A" * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, "CLFIND2D", "foo")
pytest.raises(ValueError, header.set, "Just som", "foo")
pytest.raises(ValueError, header.set, "AAAAAAAA", "foo")
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring("HIERARCH ESO DET CHIP PXSPACE = 5e6")
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
c.verify("fix")
assert str(c) == _pad("HIERARCH ESO DET CHIP PXSPACE = 5E6")
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, "TEST", float("nan"))
pytest.raises(ValueError, h.set, "TEST", np.nan)
pytest.raises(ValueError, h.set, "TEST", np.float32("nan"))
pytest.raises(ValueError, h.set, "TEST", float("inf"))
pytest.raises(ValueError, h.set, "TEST", np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([("TEST", True)])
h["TEST"] = 1
assert h["TEST"] is not True
assert isinstance(h["TEST"], int)
assert h["TEST"] == 1
h["TEST"] = np.bool_(True)
assert h["TEST"] is True
h["TEST"] = False
assert h["TEST"] is False
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
h["TEST"] = 0
assert h["TEST"] is not False
assert isinstance(h["TEST"], int)
assert h["TEST"] == 0
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h["TEST"] = 1
# int -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# int -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# Now the same tests but with zeros
h["TEST"] = 0
# int -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
# int -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, "HISTORY", "\n")
pytest.raises(ValueError, h.set, "HISTORY", "\nabc")
pytest.raises(ValueError, h.set, "HISTORY", "abc\n")
pytest.raises(ValueError, h.set, "HISTORY", "abc\ndef")
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if "\n" in card_image:
pytest.raises(fits.VerifyError, c.verify, "exception")
else:
c.verify("exception")
def test_long_commentary_card_appended_to_header(self):
"""
If a HISTORY or COMMENT card with a too-long value is appended to a
header with Header.append (as opposed to assigning to hdr['HISTORY']
it fails verification.
Regression test for https://github.com/astropy/astropy/issues/11486
"""
header = fits.Header()
value = "abc" * 90
# this is what Table does when saving its history metadata key to a
# FITS file
header.append(("history", value))
assert len(header.cards) == 1
# Test Card._split() directly since this was the main problem area
key, val = header.cards[0]._split()
assert key == "HISTORY" and val == value
# Try writing adding this header to an HDU and writing it to a file
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"), overwrite=True)
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data("test0.fits"), "rb") as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data("test0.fits"))
assert pri_hdr["NAXIS"] == pri_hdr_from_bytes["NAXIS"]
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
def test_set_keyword_with_space(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10479
"""
hdr = fits.Header()
hdr["KEY2 "] = 2
hdr["KEY2 "] = 4
assert len(hdr) == 1
assert hdr["KEY2"] == 4
assert hdr["KEY2 "] == 4
def test_strip(self):
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr.strip()
assert set(hdr) == {"HISTORY", "FOO"}
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr = hdr.copy(strip=True)
assert set(hdr) == {"HISTORY", "FOO"}
def test_update_invalid_card(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5408
Tests updating the value of a card that is malformatted (with an
invalid value literal).
This tests two ways of reproducing the problem, one working with a
Card object directly, and one when reading/writing a header containing
such an invalid card.
"""
card = fits.Card.fromstring("KW = INF / Comment")
card.value = "FIXED"
assert tuple(card) == ("KW", "FIXED", "Comment")
card.verify("fix")
assert tuple(card) == ("KW", "FIXED", "Comment")
card = fits.Card.fromstring("KW = INF")
hdu = fits.PrimaryHDU()
# This is a loophole to write a header containing a malformatted card
card._verified = True
hdu.header.append(card)
hdu.header.tofile(self.temp("bogus.fits"))
with fits.open(self.temp("bogus.fits")) as hdul:
hdul[0].header["KW"] = -1
hdul.writeto(self.temp("bogus_fixed.fits"))
with fits.open(self.temp("bogus_fixed.fits")) as hdul:
assert hdul[0].header["KW"] == -1
def test_index_numpy_int(self):
header = fits.Header([("A", "FOO"), ("B", 2), ("C", "BAR")])
idx = np.int8(2)
assert header[idx] == "BAR"
header[idx] = "BAZ"
assert header[idx] == "BAZ"
header.insert(idx, ("D", 42))
assert header[idx] == 42
header.add_comment("HELLO")
header.add_comment("WORLD")
assert header["COMMENT"][np.int64(1)] == "WORLD"
header.append(("C", "BAZBAZ"))
assert header[("C", np.int16(0))] == "BAZ"
assert header[("C", np.uint32(1))] == "BAZBAZ"
def test_header_data_size(self):
"""
Tests data size calculation (w/o padding) given a Header.
"""
hdu = fits.PrimaryHDU()
header = hdu.header
assert header.data_size == 0
header["BITPIX"] = 32
header["NAXIS"] = 2
header["NAXIS1"] = 100
header["NAXIS2"] = 100
assert header.data_size == 40000
assert header.data_size_padded == 40320
class TestRecordValuedKeywordCards(FitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the
`FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
Note that extra leading spaces in the `value` fields should be parsed on input,
but will be stripped in the cards.
"""
def setup_method(self):
super().setup_method()
self._test_header = fits.Header()
self._test_header.set("DP1", "NAXIS: 2")
self._test_header.set("DP1", "AXIS.1: 1")
self._test_header.set("DP1", "AXIS.2: 2")
self._test_header.set("DP1", "NAUX: 2")
self._test_header.set("DP1", "AUX.1.COEFF.0: 0")
self._test_header.set("DP1", "AUX.1.POWER.0: 1")
self._test_header.set("DP1", "AUX.1.COEFF.1: 0.00048828125")
self._test_header.set("DP1", "AUX.1.POWER.1: 1")
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
assert c.comment == "A comment"
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.1
assert c.field_specifier == "NAXIS"
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == "DP1"
assert c.value == "NAXIS: a"
assert c.field_specifier is None
c = fits.Card("DP1", "NAXIS: 2")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1", "NAXIS: 2.0")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1", "NAXIS: a")
assert c.keyword == "DP1"
assert c.value == "NAXIS: a"
assert c.field_specifier is None
c = fits.Card("DP1.NAXIS", 2)
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1.NAXIS", 2.0)
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
with pytest.warns(fits.verify.VerifyWarning):
c = fits.Card("DP1.NAXIS", "a")
assert c.keyword == "DP1.NAXIS"
assert c.value == "a"
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == "NAXIS"
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.comment == "A comment"
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == "NAXIS"
c.field_specifier = "NAXIS1"
assert c.field_specifier == "NAXIS1"
assert c.keyword == "DP1.NAXIS1"
assert c.value == 2.0
assert c.comment == "A comment"
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set("abc.def", 1)
header.set("abc.DEF", 2)
assert header["abc.def"] == 1
assert header["ABC.def"] == 1
assert header["aBc.def"] == 1
assert header["ABC.DEF"] == 2
assert "ABC.dEf" not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header["DP1"] == "NAXIS: 2"
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header["DP1.NAXIS"] == 2.0
assert isinstance(self._test_header["DP1.NAXIS"], float)
assert self._test_header["DP1.AUX.1.COEFF.1"] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
pytest.raises(IndexError, lambda x: self._test_header[x], 8)
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'DP1\.AXIS\.3' not found."):
self._test_header["DP1.AXIS.3"]
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header["DP1.NAXIS"] == 3.0
assert isinstance(self._test_header["DP1.NAXIS"], float)
self._test_header["DP1.AXIS.1"] = 1.1
assert self._test_header["DP1.AXIS.1"] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h["D2IM1.EXTVER"] = 1
assert h["D2IM1.EXTVER"] == 1.0
h["D2IM1.EXTVER"] = 2
assert h["D2IM1.EXTVER"] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2"
c = fits.Card("DP1.NAXIS", 2)
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2.0"
c = fits.Card("DP1.NAXIS", 2.0)
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2.0"
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set("DP1", "AXIS.3: 1", "a comment", after="DP1.AXIS.2")
assert self._test_header[3] == 1
assert self._test_header["DP1.AXIS.3"] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header["DP1.AXIS.1"]
assert len(self._test_header) == 7
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.AXIS.2"
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header["DP1.AXIS.2"]
assert len(self._test_header) == 6
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.NAUX"
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header["DP1.AXIS.*"]
assert isinstance(cl, fits.Header)
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
]
cl = self._test_header["DP1.N*"]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'",
]
cl = self._test_header["DP1.AUX..."]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
cl = self._test_header["DP?.NAXIS"]
assert [str(c).strip() for c in cl.cards] == ["DP1 = 'NAXIS: 2'"]
cl = self._test_header["DP1.A*S.*"]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
]
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header["DP1.A*..."]
assert len(self._test_header) == 2
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.NAUX"
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header["DP1.A*..."]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
cl2 = cl["*.*AUX..."]
assert [str(c).strip() for c in cl2.cards] == [
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header["DP1.AXIS.*"]
assert list(cl) == ["DP1.AXIS.1", "DP1.AXIS.2"]
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header["DP1.AXIS.*"]
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header["DP1.AXIS.*"]
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not strictly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h["HISTORY"] = "AXIS.1: 2"
h["HISTORY"] = "AXIS.2: 2"
assert "HISTORY.AXIS" not in h
assert "HISTORY.AXIS.1" not in h
assert "HISTORY.AXIS.2" not in h
assert h["HISTORY"] == ["AXIS.1: 2", "AXIS.2: 2"]
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h["HISTORY"] = "Date: 2012-09-19T13:58:53.756061"
assert "HISTORY.Date" not in h
assert str(h.cards[0]) == _pad("HISTORY Date: 2012-09-19T13:58:53.756061")
c = fits.Card.fromstring(" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ""
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h["FOO"] = "Date: 2012-09-19T13:58:53.756061"
assert "FOO.Date" not in h
assert str(h.cards[0]) == _pad("FOO = 'Date: 2012-09-19T13:58:53.756061'")
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h["FOO"] == "AXIS.1: 2"
assert h[("FOO", 1)] == "AXIS.2: 4"
assert h["FOO.AXIS.1"] == 2.0
assert h["FOO.AXIS.2"] == 4.0
assert "FOO.AXIS" not in h
assert "FOO.AXIS." not in h
assert "FOO." not in h
pytest.raises(KeyError, lambda: h["FOO.AXIS"])
pytest.raises(KeyError, lambda: h["FOO.AXIS."])
pytest.raises(KeyError, lambda: h["FOO."])
def test_fitsheader_script(self):
"""Tests the basic functionality of the `fitsheader` script."""
from astropy.io.fits.scripts import fitsheader
# Can an extension by specified by the EXTNAME keyword?
hf = fitsheader.HeaderFormatter(self.data("zerowidth.fits"))
output = hf.parse(extensions=["AIPS FQ"])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX" in output
# Can we limit the display to one specific keyword?
output = hf.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX =" not in output
assert len(output.split("\n")) == 3
# Can we limit the display to two specific keywords?
output = hf.parse(extensions=[1], keywords=["EXTNAME", "BITPIX"])
assert "EXTNAME =" in output
assert "BITPIX =" in output
assert len(output.split("\n")) == 4
# Can we use wildcards for keywords?
output = hf.parse(extensions=[1], keywords=["NAXIS*"])
assert "NAXIS =" in output
assert "NAXIS1 =" in output
assert "NAXIS2 =" in output
hf.close()
# Can an extension by specified by the EXTNAME+EXTVER keywords?
hf = fitsheader.HeaderFormatter(self.data("test0.fits"))
assert "EXTNAME = 'SCI" in hf.parse(extensions=["SCI,2"])
hf.close()
# Can we print the original header before decompression?
hf = fitsheader.HeaderFormatter(self.data("comp.fits"))
assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1], compressed=False)
assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1], compressed=True)
hf.close()
def test_fitsheader_compressed_from_primary_image_ext(self):
"""Regression test for issue https://github.com/astropy/astropy/issues/7312"""
data = np.arange(2 * 2, dtype=np.int8).reshape((2, 2))
phdu = fits.PrimaryHDU(data=data)
chdu = fits.CompImageHDU(data=phdu.data, header=phdu.header)
chdu.writeto(self.temp("tmp2.fits"), overwrite=True)
with fits.open(self.temp("tmp2.fits")) as hdul:
assert "XTENSION" not in hdul[1].header
assert "PCOUNT" not in hdul[1].header
assert "GCOUNT" not in hdul[1].header
def test_fitsheader_table_feature(self):
"""Tests the `--table` feature of the `fitsheader` script."""
from astropy.io import fits
from astropy.io.fits.scripts import fitsheader
test_filename = self.data("zerowidth.fits")
formatter = fitsheader.TableHeaderFormatter(test_filename)
with fits.open(test_filename) as fitsobj:
# Does the table contain the expected number of rows?
mytable = formatter.parse([0])
assert len(mytable) == len(fitsobj[0].header)
# Repeat the above test when multiple HDUs are requested
mytable = formatter.parse(extensions=["AIPS FQ", 2, "4"])
assert len(mytable) == (
len(fitsobj["AIPS FQ"].header)
+ len(fitsobj[2].header)
+ len(fitsobj[4].header)
)
# Can we recover the filename and extension name from the table?
mytable = formatter.parse(extensions=["AIPS FQ"])
assert np.all(mytable["filename"] == test_filename)
assert np.all(mytable["hdu"] == "AIPS FQ")
assert mytable["value"][mytable["keyword"] == "EXTNAME"] == "AIPS FQ"
# Can we specify a single extension/keyword?
mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"])
assert len(mytable) == 1
assert mytable["hdu"][0] == "AIPS FQ"
assert mytable["keyword"][0] == "EXTNAME"
assert mytable["value"][0] == "AIPS FQ"
# Is an incorrect extension dealt with gracefully?
mytable = formatter.parse(extensions=["DOES_NOT_EXIST"])
assert mytable is None
# Is an incorrect keyword dealt with gracefully?
mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["DOES_NOT_EXIST"])
assert mytable is None
formatter.close()
@pytest.mark.parametrize("mode", ["wb", "wb+", "ab", "ab+"])
def test_hdu_writeto_mode(self, mode):
with open(self.temp("mode.fits"), mode=mode) as ff:
hdu = fits.ImageHDU(data=np.ones(5))
hdu.writeto(ff)
def test_subclass():
"""Check that subclasses don't get ignored on slicing and copying."""
class MyHeader(fits.Header):
def append(self, card, *args, **kwargs):
if isinstance(card, tuple) and len(card) == 2:
# Just for our checks we add a comment if there is none.
card += ("no comment",)
return super().append(card, *args, **kwargs)
my_header = MyHeader(
(
("a", 1.0, "first"),
("b", 2.0, "second"),
(
"c",
3.0,
),
)
)
assert my_header.comments["a"] == "first"
assert my_header.comments["b"] == "second"
assert my_header.comments["c"] == "no comment"
slice_ = my_header[1:]
assert type(slice_) is MyHeader
assert slice_.comments["b"] == "second"
assert slice_.comments["c"] == "no comment"
selection = my_header["c*"]
assert type(selection) is MyHeader
assert selection.comments["c"] == "no comment"
copy_ = my_header.copy()
assert type(copy_) is MyHeader
assert copy_.comments["b"] == "second"
assert copy_.comments["c"] == "no comment"
my_header.extend((("d", 4.0),))
assert my_header.comments["d"] == "no comment"
|
85383c5ec3c31e1660c64001bce69a155cffde75ca4df6b39e7c013614d61270 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import numpy as np
import pytest
from astropy import __version__ as version
from astropy.io import fits
from astropy.io.fits import FITSDiff, HDUList, Header, ImageHDU
from astropy.io.fits.convenience import writeto
from astropy.io.fits.hdu import PrimaryHDU, hdulist
from astropy.io.fits.scripts import fitsdiff
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
class TestFITSDiff_script(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main(["-h"])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitsdiff.main(["--version"])
out = capsys.readouterr()[0]
assert out == f"fitsdiff {version}"
assert e.value.code == 0
def test_noargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main([""])
assert e.value.code == 2
def test_oneargargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main(["file1"])
assert e.value.code == 2
def test_nodiff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
def test_onediff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 12
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
def test_manydiff(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a + 1
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-4:] == [
" a> 9",
" b> 10",
" ...",
" 100 different pixels found (100.00% different).",
]
numdiff = fitsdiff.main(["-n", "1", tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-4:] == [
" a> 0",
" b> 1",
" ...",
" 100 different pixels found (100.00% different).",
]
def test_outputfile(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 12
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-o", self.temp("diff.txt"), tmp_a, tmp_b])
assert numdiff == 1
with open(self.temp("diff.txt")) as f:
out = f.read()
assert out.splitlines()[-4:] == [
" Data differs at [1, 2]:",
" a> 10",
" b> 12",
" 1 different pixels found (1.00% different).",
]
def test_atol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-a", "1", tmp_a, tmp_b])
assert numdiff == 0
numdiff = fitsdiff.main(["--exact", "-a", "1", tmp_a, tmp_b])
assert numdiff == 1
def test_rtol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-1", tmp_a, tmp_b])
assert numdiff == 0
def test_rtol_diff(self, capsys):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-2", tmp_a, tmp_b])
assert numdiff == 1
out, err = capsys.readouterr()
assert (
out
== f"""
fitsdiff: {version}
a: {tmp_a}
b: {tmp_b}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.01, Absolute tolerance: 0.0
Primary HDU:
Data contains differences:
Data differs at [1, 2]:
a> 10.0
? ^
b> 11.0
? ^
1 different pixels found (1.00% different).
"""
)
assert err == ""
def test_wildcard(self):
tmp1 = self.temp("tmp_file1")
with pytest.raises(SystemExit) as e:
fitsdiff.main([tmp1 + "*", "ACME"])
assert e.value.code == 2
def test_not_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert (
out
== f"""
fitsdiff: {version}
a: {tmp_a}
b: {tmp_b}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.0, Absolute tolerance: 0.0
No differences found.
"""
)
assert err == ""
def test_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-q", tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
@pytest.mark.slow
def test_path(self, capsys):
os.mkdir(self.temp("sub/"))
tmp_b = self.temp("sub/ascii.fits")
tmp_g = self.temp("sub/group.fits")
tmp_h = self.data("group.fits")
with hdulist.fitsopen(tmp_h) as hdu_b:
hdu_b.writeto(tmp_g)
writeto(tmp_b, np.arange(100).reshape(10, 10))
# one modified file and a directory
assert fitsdiff.main(["-q", self.data_dir, tmp_b]) == 1
assert fitsdiff.main(["-q", tmp_b, self.data_dir]) == 1
# two directories
tmp_d = self.temp("sub/")
assert fitsdiff.main(["-q", self.data_dir, tmp_d]) == 1
assert fitsdiff.main(["-q", tmp_d, self.data_dir]) == 1
with pytest.warns(
UserWarning,
match=r"Field 'ORBPARM' has a repeat count of 0 in its format code",
):
assert fitsdiff.main(["-q", self.data_dir, self.data_dir]) == 0
# no match
tmp_c = self.data("arange.fits")
fitsdiff.main([tmp_c, tmp_d])
out, err = capsys.readouterr()
assert "'arange.fits' has no match in" in err
# globbing
with pytest.warns(
UserWarning,
match=r"Field 'ORBPARM' has a repeat count of 0 in its format code",
):
assert fitsdiff.main(["-q", self.data_dir + "/*.fits", self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir + "/g*.fits", tmp_d]) == 0
# one file and a directory
tmp_f = self.data("tb.fits")
assert fitsdiff.main(["-q", tmp_f, self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir, tmp_f]) == 0
def test_ignore_hdus(self):
a = np.arange(100).reshape(10, 10)
b = a.copy() + 1
ha = Header([("A", 1), ("B", 2), ("C", 3)])
phdu_a = PrimaryHDU(header=ha)
phdu_b = PrimaryHDU(header=ha)
ihdu_a = ImageHDU(data=a, name="SCI")
ihdu_b = ImageHDU(data=b, name="SCI")
hdulist_a = HDUList([phdu_a, ihdu_a])
hdulist_b = HDUList([phdu_b, ihdu_b])
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdulist_a.writeto(tmp_a)
hdulist_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
numdiff = fitsdiff.main([tmp_a, tmp_b, "-u", "SCI"])
assert numdiff == 0
def test_ignore_hdus_report(self, capsys):
a = np.arange(100).reshape(10, 10)
b = a.copy() + 1
ha = Header([("A", 1), ("B", 2), ("C", 3)])
phdu_a = PrimaryHDU(header=ha)
phdu_b = PrimaryHDU(header=ha)
ihdu_a = ImageHDU(data=a, name="SCI")
ihdu_b = ImageHDU(data=b, name="SCI")
hdulist_a = HDUList([phdu_a, ihdu_a])
hdulist_b = HDUList([phdu_b, ihdu_b])
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdulist_a.writeto(tmp_a)
hdulist_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b, "-u", "SCI"])
assert numdiff == 0
out, err = capsys.readouterr()
assert "testa.fits" in out
assert "testb.fits" in out
@pytest.mark.skip(reason="fails intentionally to show open files (see PR #10159)")
def test_fitsdiff_openfile(tmp_path):
"""Make sure that failing FITSDiff doesn't leave open files."""
path1 = tmp_path / "file1.fits"
path2 = tmp_path / "file2.fits"
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert diff.identical, diff.report()
class Test_FITSDiff(FitsTestCase):
def test_FITSDiff_report(self, home_is_temp):
self.copy_file("test0.fits")
fits.setval(self.temp("test0.fits"), "TESTKEY", value="testval")
d = FITSDiff(self.data("test0.fits"), self.temp("test0.fits"))
assert not d.identical
d.report(self.temp("diff_report.txt"))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
d.report(self.temp("diff_report.txt"), overwrite=False)
d.report(self.temp("diff_report.txt"), overwrite=True)
with open(os.path.expanduser(self.temp("diff_report.txt"))) as f:
assert "Extra keyword 'TESTKEY' in b: 'testval'" in f.read()
|
97d1ae3bf63d2090fd4b500e6c30f50c59fb701ba799a919cb3d02af65a6d891 | import gc
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.io import fits
from astropy.io.fits import (
BinTableHDU,
HDUList,
ImageHDU,
PrimaryHDU,
connect,
table_to_hdu,
)
from astropy.io.fits.column import (
_fortran_to_python_format,
_parse_tdisp_format,
python_to_tdisp,
)
from astropy.io.tests.mixin_columns import compare_attrs, mixin_cols, serialized_names
from astropy.table import Column, QTable, Table
from astropy.table.table_helpers import simple_table
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.units.format.fits import UnitScaleError
from astropy.units.quantity import QuantityInfo
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
# FITS does not preserve precision, in_subfmt, and out_subfmt.
time_attrs = ["value", "shape", "format", "scale", "location"]
compare_attrs = {
name: (time_attrs if isinstance(col, Time) else compare_attrs[name])
for name, col in mixin_cols.items()
}
# FITS does not support multi-element location, array with object dtype,
# or logarithmic quantities.
unsupported_cols = {
name: col
for name, col in mixin_cols.items()
if (
isinstance(col, Time)
and col.location.shape != ()
or isinstance(col, np.ndarray)
and col.dtype.kind == "O"
or isinstance(col, u.LogQuantity)
)
}
mixin_cols = {
name: col for name, col in mixin_cols.items() if name not in unsupported_cols
}
def equal_data(a, b):
return all(np.all(a[name] == b[name]) for name in a.dtype.names)
class TestSingleTable:
def setup_class(self):
self.data = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "U1"), ("c", float)],
)
def test_simple(self, tmp_path):
filename = tmp_path / "test_simple.fts"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_pathlib(self, tmp_path):
filename = tmp_path / "test_simple.fit"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_meta(self, tmp_path):
filename = tmp_path / "test_simple.fits"
t1 = Table(self.data)
t1.meta["A"] = 1
t1.meta["B"] = 2.3
t1.meta["C"] = "spam"
t1.meta["comments"] = ["this", "is", "a", "long", "comment"]
t1.meta["HISTORY"] = ["first", "second", "third"]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
for key in t1.meta:
if isinstance(t1.meta, list):
for i in range(len(t1.meta[key])):
assert t1.meta[key][i] == t2.meta[key][i]
else:
assert t1.meta[key] == t2.meta[key]
def test_simple_meta_conflicting(self, tmp_path):
filename = tmp_path / "test_simple.fits"
t1 = Table(self.data)
t1.meta["ttype1"] = "spam"
with pytest.warns(
AstropyUserWarning,
match=(
"Meta-data keyword ttype1 "
"will be ignored since it conflicts with a FITS "
"reserved keyword"
),
) as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
def test_simple_noextension(self, tmp_path):
"""
Test that file type is recognized without extension
"""
filename = tmp_path / "test_simple"
t1 = Table(self.data)
t1.write(filename, overwrite=True, format="fits")
t2 = Table.read(filename)
assert equal_data(t1, t2)
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_with_units(self, table_type, tmp_path):
filename = tmp_path / "test_with_units.fits"
t1 = table_type(self.data)
t1["a"].unit = u.m
t1["c"].unit = u.km / u.s
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2["a"].unit == u.m
assert t2["c"].unit == u.km / u.s
def test_with_custom_units_qtable(self, tmp_path):
# Test only for QTable - for Table's Column, new units are dropped
# (as is checked in test_write_drop_nonstandard_units).
filename = tmp_path / "test_with_units.fits"
unit = u.def_unit("bandpass_sol_lum")
t = QTable()
t["l"] = np.ones(5) * unit
with pytest.warns(AstropyUserWarning) as w:
t.write(filename, overwrite=True)
assert len(w) == 1
assert "bandpass_sol_lum" in str(w[0].message)
# Just reading back, the data is fine but the unit is not recognized.
with pytest.warns(
u.UnitsWarning, match="'bandpass_sol_lum' did not parse"
) as w:
t2 = QTable.read(filename)
assert len(w) == 1
assert isinstance(t2["l"].unit, u.UnrecognizedUnit)
assert str(t2["l"].unit) == "bandpass_sol_lum"
assert np.all(t2["l"].value == t["l"].value)
# But if we enable the unit, it should be recognized.
with u.add_enabled_units(unit):
t3 = QTable.read(filename)
assert t3["l"].unit is unit
assert equal_data(t3, t)
# Regression check for #8897; write used to fail when a custom
# unit was enabled.
with pytest.warns(AstropyUserWarning):
t3.write(filename, overwrite=True)
# It should also be possible to read the file in using a unit alias,
# even to a unit that may not be the same.
with u.set_enabled_aliases({"bandpass_sol_lum": u.Lsun}):
t3 = QTable.read(filename)
assert t3["l"].unit is u.Lsun
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_read_with_unit_aliases(self, table_type):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = "Angstroms"
hdu.columns[2].unit = "ergs/(cm.s.Angstroms)"
with u.set_enabled_aliases(dict(Angstroms=u.AA, ergs=u.erg)):
t = table_type.read(hdu)
assert t["a"].unit == u.AA
assert t["c"].unit == u.erg / (u.cm * u.s * u.AA)
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_with_format(self, table_type, tmp_path):
filename = tmp_path / "test_with_format.fits"
t1 = table_type(self.data)
t1["a"].format = "{:5d}"
t1["b"].format = "{:>20}"
t1["c"].format = "{:6.2f}"
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2["a"].format == "{:5d}"
assert t2["b"].format == "{:>20}"
assert t2["c"].format == "{:6.2f}"
def test_masked(self, tmp_path):
filename = tmp_path / "test_masked.fits"
t1 = Table(self.data, masked=True)
t1.mask["a"] = [1, 0, 1, 0]
t1.mask["b"] = [1, 0, 0, 1]
t1.mask["c"] = [0, 1, 1, 0]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
assert np.all(t1["a"].mask == t2["a"].mask)
assert np.all(t1["b"].mask == t2["b"].mask)
assert np.all(t1["c"].mask == t2["c"].mask)
@pytest.mark.parametrize("masked", [True, False])
def test_masked_nan(self, masked, tmp_path):
"""Check that masked values by default are replaced by NaN.
This should work for any shape and be independent of whether the
Table is formally masked or not.
"""
filename = tmp_path / "test_masked_nan.fits"
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1], dtype="f4")
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=["a", "b", "c"], masked=masked)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2["a"].data, [np.nan, 8.5, np.nan, 6.25])
assert_array_equal(t2["b"].data, [np.nan, 4.5, 6.75, np.nan])
assert_array_equal(
t2["c"].data, np.stack([t2["a"].data, t2["b"].data], axis=-1)
)
assert np.all(t1["a"].mask == t2["a"].mask)
assert np.all(t1["b"].mask == t2["b"].mask)
assert np.all(t1["c"].mask == t2["c"].mask)
def test_masked_serialize_data_mask(self, tmp_path):
filename = tmp_path / "test_masked_nan.fits"
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1])
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=["a", "b", "c"])
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2["a"].data, [5.25, 8.5, 3.75, 6.25])
assert_array_equal(t2["b"].data, [2.5, 4.5, 6.75, 8.875])
assert_array_equal(
t2["c"].data, np.stack([t2["a"].data, t2["b"].data], axis=-1)
)
assert np.all(t1["a"].mask == t2["a"].mask)
assert np.all(t1["b"].mask == t2["b"].mask)
assert np.all(t1["c"].mask == t2["c"].mask)
def test_read_from_fileobj(self, tmp_path):
filename = tmp_path / "test_read_from_fileobj.fits"
hdu = BinTableHDU(self.data)
hdu.writeto(filename, overwrite=True)
with open(filename, "rb") as f:
t = Table.read(f)
assert equal_data(t, self.data)
def test_read_with_nonstandard_units(self):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = "RADIANS"
hdu.columns[1].unit = "spam"
hdu.columns[2].unit = "millieggs"
with pytest.warns(u.UnitsWarning, match="did not parse as fits unit"):
t = Table.read(hdu)
assert equal_data(t, self.data)
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_write_drop_nonstandard_units(self, table_type, tmp_path):
# While we are generous on input (see above), we are strict on
# output, dropping units not recognized by the fits standard.
filename = tmp_path / "test_nonstandard_units.fits"
spam = u.def_unit("spam")
t = table_type()
t["a"] = [1.0, 2.0, 3.0] * spam
with pytest.warns(AstropyUserWarning, match="spam") as w:
t.write(filename)
assert len(w) == 1
if table_type is Table:
assert "cannot be recovered in reading. " in str(w[0].message)
else:
assert "lost to non-astropy fits readers" in str(w[0].message)
with fits.open(filename) as ff:
hdu = ff[1]
assert "TUNIT1" not in hdu.header
def test_memmap(self, tmp_path):
filename = tmp_path / "test_simple.fts"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, memmap=False)
t3 = Table.read(filename, memmap=True)
assert equal_data(t2, t3)
# To avoid issues with open files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
@pytest.mark.parametrize("memmap", (False, True))
def test_character_as_bytes(self, tmp_path, memmap):
filename = tmp_path / "test_simple.fts"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)
t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)
assert t2["b"].dtype.kind == "U"
assert t3["b"].dtype.kind == "S"
assert equal_data(t2, t3)
# To avoid issues with open files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
def test_oned_single_element(self, tmp_path):
filename = tmp_path / "test_oned_single_element.fits"
table = Table({"x": [[1], [2]]})
table.write(filename, overwrite=True)
read = Table.read(filename)
assert read["x"].shape == (2, 1)
assert len(read["x"][0]) == 1
def test_write_append(self, tmp_path):
t = Table(self.data)
hdu = table_to_hdu(t)
def check_equal(filename, expected, start_from=1):
with fits.open(filename) as hdu_list:
assert len(hdu_list) == expected
for hdu_table in hdu_list[start_from:]:
assert hdu_table.header == hdu.header
assert np.all(hdu_table.data == hdu.data)
filename = tmp_path / "test_write_append.fits"
t.write(filename, append=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Check the overwrite works correctly.
t.write(filename, append=True, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Normal write, check it's not appending.
t.write(filename, overwrite=True)
t.write(filename, overwrite=True)
check_equal(filename, 2)
# Now write followed by append, with different shaped tables.
t2 = Table(np.array([1, 2]))
t2.write(filename, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3, start_from=2)
assert equal_data(t2, Table.read(filename, hdu=1))
def test_write_overwrite(self, tmp_path):
t = Table(self.data)
filename = tmp_path / "test_write_overwrite.fits"
t.write(filename)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename)
t.write(filename, overwrite=True)
def test_mask_nans_on_read(self, tmp_path):
filename = tmp_path / "test_inexact_format_parse_on_read.fits"
c1 = fits.Column(name="a", array=np.array([1, 2, np.nan]), format="E")
table_hdu = fits.TableHDU.from_columns([c1])
table_hdu.writeto(filename)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
tab = Table.read(filename, mask_invalid=False)
assert tab.mask is None
# using memmap also deactivate the masking
tab = Table.read(filename, memmap=True)
assert tab.mask is None
def test_mask_null_on_read(self, tmp_path):
filename = tmp_path / "test_null_format_parse_on_read.fits"
col = fits.Column(
name="a",
array=np.array([1, 2, 99, 60000], dtype="u2"),
format="I",
null=99,
bzero=32768,
)
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
def test_mask_str_on_read(self, tmp_path):
filename = tmp_path / "test_null_format_parse_on_read.fits"
col = fits.Column(
name="a", array=np.array([b"foo", b"bar", b""], dtype="|S3"), format="A3"
)
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
tab = Table.read(filename, mask_invalid=False)
assert tab.mask is None
class TestMultipleHDU:
def setup_class(self):
self.data1 = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "U1"), ("c", float)],
)
self.data2 = np.array(
list(zip([1.4, 2.3, 3.2, 4.7], [2.3, 4.5, 6.7, 8.9])),
dtype=[("p", float), ("q", float)],
)
self.data3 = np.array(
list(zip([1, 2, 3, 4], [2.3, 4.5, 6.7, 8.9])),
dtype=[("A", int), ("B", float)],
)
hdu0 = PrimaryHDU()
hdu1 = BinTableHDU(self.data1, name="first")
hdu2 = BinTableHDU(self.data2, name="second")
hdu3 = ImageHDU(np.ones((3, 3)), name="third")
hdu4 = BinTableHDU(self.data3)
self.hdus = HDUList([hdu0, hdu1, hdu2, hdu3, hdu4])
self.hdusb = HDUList([hdu0, hdu3, hdu2, hdu1])
self.hdus3 = HDUList([hdu0, hdu3, hdu2])
self.hdus2 = HDUList([hdu0, hdu1, hdu3])
self.hdus1 = HDUList([hdu0, hdu1])
def teardown_class(self):
del self.hdus
def setup_method(self, method):
warnings.filterwarnings("always")
def test_read(self, tmp_path):
filename = tmp_path / "test_read.fits"
self.hdus.writeto(filename)
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)",
):
t = Table.read(filename)
assert equal_data(t, self.data1)
filename = tmp_path / "test_read_2.fits"
self.hdusb.writeto(filename)
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)",
):
t3 = Table.read(filename)
assert equal_data(t3, self.data2)
def test_read_with_hdu_0(self, tmp_path):
filename = tmp_path / "test_read_with_hdu_0.fits"
self.hdus.writeto(filename)
with pytest.raises(ValueError) as exc:
Table.read(filename, hdu=0)
assert exc.value.args[0] == "No table found in hdu=0"
@pytest.mark.parametrize("hdu", [1, "first"])
def test_read_with_hdu_1(self, tmp_path, hdu):
filename = tmp_path / "test_read_with_hdu_1.fits"
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize("hdu", [2, "second"])
def test_read_with_hdu_2(self, tmp_path, hdu):
filename = tmp_path / "test_read_with_hdu_2.fits"
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize("hdu", [3, "third"])
def test_read_with_hdu_3(self, tmp_path, hdu):
filename = tmp_path / "test_read_with_hdu_3.fits"
self.hdus.writeto(filename)
with pytest.raises(ValueError, match="No table found in hdu=3"):
Table.read(filename, hdu=hdu)
def test_read_with_hdu_4(self, tmp_path):
filename = tmp_path / "test_read_with_hdu_4.fits"
self.hdus.writeto(filename)
t = Table.read(filename, hdu=4)
assert equal_data(t, self.data3)
@pytest.mark.parametrize("hdu", [2, 3, "1", "second", ""])
def test_read_with_hdu_missing(self, tmp_path, hdu):
filename = tmp_path / "test_warn_with_hdu_1.fits"
self.hdus1.writeto(filename)
with pytest.warns(
AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)",
):
t1 = Table.read(filename, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize("hdu", [0, 2, "third"])
def test_read_with_hdu_warning(self, tmp_path, hdu):
filename = tmp_path / "test_warn_with_hdu_2.fits"
self.hdus2.writeto(filename)
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)",
):
t2 = Table.read(filename, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize("hdu", [0, 1, "third"])
def test_read_in_last_hdu(self, tmp_path, hdu):
filename = tmp_path / "test_warn_with_hdu_3.fits"
self.hdus3.writeto(filename)
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)",
):
t3 = Table.read(filename, hdu=hdu)
assert equal_data(t3, self.data2)
def test_read_from_hdulist(self):
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)",
):
t = Table.read(self.hdus)
assert equal_data(t, self.data1)
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)",
):
t3 = Table.read(self.hdusb)
assert equal_data(t3, self.data2)
def test_read_from_hdulist_with_hdu_0(self):
with pytest.raises(ValueError) as exc:
Table.read(self.hdus, hdu=0)
assert exc.value.args[0] == "No table found in hdu=0"
@pytest.mark.parametrize("hdu", [1, "first", None])
def test_read_from_hdulist_with_single_table(self, hdu):
t = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize("hdu", [1, "first"])
def test_read_from_hdulist_with_hdu_1(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize("hdu", [2, "second"])
def test_read_from_hdulist_with_hdu_2(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize("hdu", [3, "third"])
def test_read_from_hdulist_with_hdu_3(self, hdu):
with pytest.raises(ValueError, match="No table found in hdu=3"):
Table.read(self.hdus, hdu=hdu)
@pytest.mark.parametrize("hdu", [0, 2, "third"])
def test_read_from_hdulist_with_hdu_warning(self, hdu):
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)",
):
t2 = Table.read(self.hdus2, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize("hdu", [2, 3, "1", "second", ""])
def test_read_from_hdulist_with_hdu_missing(self, hdu):
with pytest.warns(
AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)",
):
t1 = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize("hdu", [0, 1, "third"])
def test_read_from_hdulist_in_last_hdu(self, hdu):
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)",
):
t3 = Table.read(self.hdus3, hdu=hdu)
assert equal_data(t3, self.data2)
@pytest.mark.parametrize("hdu", [None, 1, "first"])
def test_read_from_single_hdu(self, hdu):
t = Table.read(self.hdus[1])
assert equal_data(t, self.data1)
def test_masking_regression_1795():
"""
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked.
"""
t = Table.read(get_pkg_data_filename("data/tb.fits"))
assert np.all(t["c1"].mask == np.array([False, False]))
assert not hasattr(t["c2"], "mask")
assert not hasattr(t["c3"], "mask")
assert not hasattr(t["c4"], "mask")
assert np.all(t["c1"].data == np.array([1, 2]))
assert np.all(t["c2"].data == np.array([b"abc", b"xy "]))
assert_allclose(t["c3"].data, np.array([3.70000007153, 6.6999997139]))
assert np.all(t["c4"].data == np.array([False, True]))
def test_scale_error():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ["x", "y", "z"]
t = Table([a, b, c], names=("a", "b", "c"), meta={"name": "first table"})
t["a"].unit = "1.2"
with pytest.raises(
UnitScaleError,
match=r"The column 'a' could not be "
r"stored in FITS format because it has a scale '\(1\.2\)'"
r" that is not recognized by the FITS standard\. Either "
r"scale the data or change the units\.",
):
t.write("t.fits", format="fits", overwrite=True)
@pytest.mark.parametrize(
"tdisp_str, format_return",
[
("EN10.5", ("EN", "10", "5", None)),
("F6.2", ("F", "6", "2", None)),
("B5.10", ("B", "5", "10", None)),
("E10.5E3", ("E", "10", "5", "3")),
("A21", ("A", "21", None, None)),
],
)
def test_parse_tdisp_format(tdisp_str, format_return):
assert _parse_tdisp_format(tdisp_str) == format_return
@pytest.mark.parametrize(
"tdisp_str, format_str_return",
[
("G15.4E2", "{:15.4g}"),
("Z5.10", "{:5x}"),
("I6.5", "{:6d}"),
("L8", "{:>8}"),
("E20.7", "{:20.7e}"),
],
)
def test_fortran_to_python_format(tdisp_str, format_str_return):
assert _fortran_to_python_format(tdisp_str) == format_str_return
@pytest.mark.parametrize(
"fmt_str, tdisp_str",
[
("{:3d}", "I3"),
("3d", "I3"),
("7.3f", "F7.3"),
("{:>4}", "A4"),
("{:7.4f}", "F7.4"),
("%5.3g", "G5.3"),
("%10s", "A10"),
("%.4f", "F13.4"),
],
)
def test_python_to_tdisp(fmt_str, tdisp_str):
assert python_to_tdisp(fmt_str) == tdisp_str
def test_logical_python_to_tdisp():
assert python_to_tdisp("{:>7}", logical_dtype=True) == "L7"
def test_bool_column(tmp_path):
"""
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table.
"""
arr = np.ones(5, dtype=bool)
arr[::2] == np.False_
t = Table([arr])
t.write(tmp_path / "test.fits", overwrite=True)
with fits.open(tmp_path / "test.fits") as hdul:
assert hdul[1].data["col0"].dtype == np.dtype("bool")
assert np.all(hdul[1].data["col0"] == arr)
def test_unicode_column(tmp_path):
"""
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228
"""
t = Table([np.array(["a", "b", "cd"])])
t.write(tmp_path / "test.fits", overwrite=True)
with fits.open(tmp_path / "test.fits") as hdul:
assert np.all(hdul[1].data["col0"] == ["a", "b", "cd"])
assert hdul[1].header["TFORM1"] == "2A"
t2 = Table([np.array(["\N{SNOWMAN}"])])
with pytest.raises(UnicodeEncodeError):
t2.write(tmp_path / "test.fits", overwrite=True)
def test_unit_warnings_read_write(tmp_path):
filename = tmp_path / "test_unit.fits"
t1 = Table([[1, 2], [3, 4]], names=["a", "b"])
t1["a"].unit = "m/s"
t1["b"].unit = "not-a-unit"
with pytest.warns(
u.UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
with pytest.warns(
u.UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
Table.read(filename, hdu=1)
def test_convert_comment_convention():
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
filename = get_pkg_data_filename("data/stddata.fits")
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables are present",
):
t = Table.read(filename)
assert t.meta["comments"] == [
"",
" *** End of mandatory fields ***",
"",
"",
" *** Column names ***",
"",
"",
" *** Column formats ***",
"",
]
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = [
"info.name",
"info.format",
"info.unit",
"info.description",
"info.meta",
"info.dtype",
]
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split("."):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == "info.meta":
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == "f":
assert quantity_allclose(a1, a2, rtol=1e-15)
elif isinstance(a1, np.dtype):
# FITS does not perfectly preserve dtype: byte order can change, and
# unicode gets stored as bytes. So, we just check safe casting, to
# ensure we do not, e.g., accidentally change integer to float, etc.
if NUMPY_LT_1_22 and a1.names:
# For old numpy, can_cast does not deal well with structured dtype.
assert a1.names == a2.names
else:
assert np.can_cast(a2, a1, casting="safe")
else:
assert np.all(a1 == a2)
def test_fits_mixins_qtable_to_table(tmp_path):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmp_path / "test_simple.fits"
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format="fits")
t2 = Table.read(filename, format="fits", astropy_native=True)
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_fits_mixins_as_one(table_cls, tmp_path):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmp_path / "test_simple.fits"
names = sorted(mixin_cols)
# FITS stores times directly, so we just get the column back.
all_serialized_names = []
for name in sorted(mixin_cols):
all_serialized_names.extend(
[name] if isinstance(mixin_cols[name], Time) else serialized_names[name]
)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="fits")
t2 = table_cls.read(filename, format="fits", astropy_native=True)
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["HISTORY"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
# Read directly via fits and confirm column names
with fits.open(filename) as hdus:
assert hdus[1].columns.names == all_serialized_names
@pytest.mark.parametrize("name_col", list(mixin_cols.items()))
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_fits_mixins_per_column(table_cls, name_col, tmp_path):
"""Test write/read one col at a time and do detailed validation"""
filename = tmp_path / "test_simple.fits"
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "my \n\n\n description"
t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}}
if not t.has_mixin_columns:
pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)")
t.write(filename, format="fits")
t2 = table_cls.read(filename, format="fits", astropy_native=True)
if isinstance(col, Time):
# FITS Time does not preserve format
t2[name].format = col.format
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ["data"] if colname in ("c1", "c2") else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize("name_col", unsupported_cols.items())
@pytest.mark.xfail(reason="column type unsupported")
def test_fits_unsupported_mixin(self, name_col, tmp_path):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = tmp_path / "test_simple.fits"
name, col = name_col
Table([col], names=[name]).write(filename, format="fits")
def test_info_attributes_with_no_mixins(tmp_path):
"""Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized
"""
filename = tmp_path / "test.fits"
t = Table([[1.0, 2.0]])
t["col0"].description = "hello" * 40
t["col0"].format = "{:8.4f}"
t["col0"].meta["a"] = {"b": "c"}
t.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2["col0"].description == "hello" * 40
assert t2["col0"].format == "{:8.4f}"
assert t2["col0"].meta["a"] == {"b": "c"}
@pytest.mark.parametrize("method", ["set_cols", "names", "class"])
def test_round_trip_masked_table_serialize_mask(tmp_path, method):
"""
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct.
"""
filename = tmp_path / "test.fits"
t = simple_table(masked=True) # int, float, and str cols with one masked element
# MaskedColumn but no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about we test a column with no masked elements.
t["d"] = [1, 2, 3]
if method == "set_cols":
for col in t.itercols():
col.info.serialize_method["fits"] = "data_mask"
t.write(filename)
elif method == "names":
t.write(
filename,
serialize_method={
"a": "data_mask",
"b": "data_mask",
"c": "data_mask",
"d": "data_mask",
},
)
elif method == "class":
t.write(filename, serialize_method="data_mask")
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
def test_meta_not_modified(tmp_path):
filename = tmp_path / "test.fits"
t = Table(data=[Column([1, 2], "a", description="spam")])
t.meta["comments"] = ["a", "b"]
assert len(t.meta) == 1
t.write(filename)
assert len(t.meta) == 1
assert t.meta["comments"] == ["a", "b"]
def test_is_fits_gh_14305():
"""Regression test for https://github.com/astropy/astropy/issues/14305"""
assert not connect.is_fits("", "foo.bar", None)
|
66095e7d2f580c86a51559446e568428919b09fa1d9194596400e9e9b04bbeef | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import errno
import gzip
import io
import mmap
import os
import pathlib
import shutil
import sys
import urllib.request
import zipfile
from unittest.mock import patch
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.convenience import _getext
from astropy.io.fits.diff import FITSDiff
from astropy.io.fits.file import GZIP_MAGIC, _File
from astropy.io.tests import safeio
from astropy.utils import data
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
from astropy.utils.data import conf
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
if HAS_BZ2:
import bz2
class TestCore(FitsTestCase):
def test_missing_file(self):
with pytest.raises(OSError):
fits.open(self.temp("does-not-exist.fits"))
def test_naxisj_check(self):
with fits.open(self.data("o4sp040b0_raw.fits")) as hdulist:
hdulist[1].header["NAXIS3"] = 500
assert "NAXIS3" in hdulist[1].header
hdulist.verify("silentfix")
assert "NAXIS3" not in hdulist[1].header
def test_byteswap(self):
p = fits.PrimaryHDU()
lst = fits.HDUList()
n = np.array([1, 60000, 0], dtype="u2").astype("i2")
c = fits.Column(name="foo", format="i2", bscale=1, bzero=32768, array=n)
t = fits.BinTableHDU.from_columns([c])
lst.append(p)
lst.append(t)
lst.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as p:
assert p[1].data[1]["foo"] == 60000.0
def test_fits_file_path_object(self):
"""
Testing when fits file is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(self.data("tdim.fits"))
with fits.open(fpath) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data("tdim.fits")) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_fits_pathlike_object(self):
"""
Testing when fits file is passed as os.PathLike object #11579.
"""
class TPath(os.PathLike):
def __init__(self, path):
self.path = path
def __fspath__(self):
return str(self.path)
fpath = TPath(self.data("tdim.fits"))
with fits.open(fpath) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data("tdim.fits")) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_fits_file_bytes_object(self):
"""
Testing when fits file is passed as bytes.
"""
with fits.open(self.data("tdim.fits").encode()) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data("tdim.fits")) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_add_del_columns(self):
p = fits.ColDefs([])
p.add_col(fits.Column(name="FOO", format="3J"))
p.add_col(fits.Column(name="BAR", format="1I"))
assert p.names == ["FOO", "BAR"]
p.del_col("FOO")
assert p.names == ["BAR"]
def test_add_del_columns2(self):
hdulist = fits.open(self.data("tb.fits"))
table = hdulist[1]
assert table.data.dtype.names == ("c1", "c2", "c3", "c4")
assert table.columns.names == ["c1", "c2", "c3", "c4"]
table.columns.del_col("c1")
assert table.data.dtype.names == ("c2", "c3", "c4")
assert table.columns.names == ["c2", "c3", "c4"]
table.columns.del_col("c3")
assert table.data.dtype.names == ("c2", "c4")
assert table.columns.names == ["c2", "c4"]
table.columns.add_col(fits.Column("foo", "3J"))
assert table.data.dtype.names == ("c2", "c4", "foo")
assert table.columns.names == ["c2", "c4", "foo"]
hdulist.writeto(self.temp("test.fits"), overwrite=True)
hdulist.close()
# NOTE: If you see a warning, might be related to
# https://github.com/spacetelescope/PyFITS/issues/44
with fits.open(self.temp("test.fits")) as hdulist:
table = hdulist[1]
assert table.data.dtype.names == ("c2", "c4", "foo")
assert table.columns.names == ["c2", "c4", "foo"]
def test_update_header_card(self):
"""A very basic test for the Header.update method--I'd like to add a
few more cases to this at some point.
"""
header = fits.Header()
comment = "number of bits per data pixel"
header["BITPIX"] = (16, comment)
assert "BITPIX" in header
assert header["BITPIX"] == 16
assert header.comments["BITPIX"] == comment
header.update(BITPIX=32)
assert header["BITPIX"] == 32
assert header.comments["BITPIX"] == ""
def test_set_card_value(self):
"""Similar to test_update_header_card(), but tests the the
`header['FOO'] = 'bar'` method of updating card values.
"""
header = fits.Header()
comment = "number of bits per data pixel"
card = fits.Card.fromstring(f"BITPIX = 32 / {comment}")
header.append(card)
header["BITPIX"] = 32
assert "BITPIX" in header
assert header["BITPIX"] == 32
assert header.cards[0].keyword == "BITPIX"
assert header.cards[0].value == 32
assert header.cards[0].comment == comment
def test_uint(self):
filename = self.data("o4sp040b0_raw.fits")
with fits.open(filename, uint=False) as hdulist_f:
with fits.open(filename, uint=True) as hdulist_i:
assert hdulist_f[1].data.dtype == np.float32
assert hdulist_i[1].data.dtype == np.uint16
assert np.all(hdulist_f[1].data == hdulist_i[1].data)
def test_fix_missing_card_append(self):
hdu = fits.ImageHDU()
errs = hdu.req_cards("TESTKW", None, None, "foo", "silentfix", [])
assert len(errs) == 1
assert "TESTKW" in hdu.header
assert hdu.header["TESTKW"] == "foo"
assert hdu.header.cards[-1].keyword == "TESTKW"
def test_fix_invalid_keyword_value(self):
hdu = fits.ImageHDU()
hdu.header["TESTKW"] = "foo"
errs = hdu.req_cards("TESTKW", None, lambda v: v == "foo", "foo", "ignore", [])
assert len(errs) == 0
# Now try a test that will fail, and ensure that an error will be
# raised in 'exception' mode
errs = hdu.req_cards(
"TESTKW", None, lambda v: v == "bar", "bar", "exception", []
)
assert len(errs) == 1
assert errs[0][1] == "'TESTKW' card has invalid value 'foo'."
# See if fixing will work
hdu.req_cards("TESTKW", None, lambda v: v == "bar", "bar", "silentfix", [])
assert hdu.header["TESTKW"] == "bar"
def test_unfixable_missing_card(self):
class TestHDU(fits.hdu.base.NonstandardExtHDU):
def _verify(self, option="warn"):
errs = super()._verify(option)
hdu.req_cards("TESTKW", None, None, None, "fix", errs)
return errs
@classmethod
def match_header(cls, header):
# Since creating this HDU class adds it to the registry we
# don't want the file reader to possibly think any actual
# HDU from a file should be handled by this class
return False
hdu = TestHDU(header=fits.Header())
with pytest.raises(fits.VerifyError):
hdu.verify("fix")
def test_exception_on_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header["XTENSION"]
with pytest.raises(fits.VerifyError):
hdu.verify("exception")
def test_ignore_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header["NAXIS"]
# The default here would be to issue a warning; ensure that no warnings
# or exceptions are raised
hdu.verify("ignore")
# Make sure the error wasn't fixed either, silently or otherwise
assert "NAXIS" not in hdu.header
def test_unrecognized_verify_option(self):
hdu = fits.ImageHDU()
with pytest.raises(ValueError):
hdu.verify("foobarbaz")
def test_errlist_basic(self):
# Just some tests to make sure that _ErrList is setup correctly.
# No arguments
error_list = fits.verify._ErrList()
assert error_list == []
# Some contents - this is not actually working, it just makes sure they
# are kept.
error_list = fits.verify._ErrList([1, 2, 3])
assert error_list == [1, 2, 3]
def test_combined_verify_options(self):
"""
Test verify options like fix+ignore.
"""
def make_invalid_hdu():
hdu = fits.ImageHDU()
# Add one keyword to the header that contains a fixable defect, and one
# with an unfixable defect
c1 = fits.Card.fromstring("test = ' test'")
c2 = fits.Card.fromstring("P.I. = ' Hubble'")
hdu.header.append(c1)
hdu.header.append(c2)
return hdu
# silentfix+ignore should be completely silent
hdu = make_invalid_hdu()
hdu.verify("silentfix+ignore")
# silentfix+warn should be quiet about the fixed HDU and only warn
# about the unfixable one
hdu = make_invalid_hdu()
with pytest.warns(AstropyUserWarning, match="Illegal keyword name") as w:
hdu.verify("silentfix+warn")
assert len(w) == 4
# silentfix+exception should only mention the unfixable error in the
# exception
hdu = make_invalid_hdu()
with pytest.raises(fits.VerifyError, match=r"Illegal keyword name") as excinfo:
hdu.verify("silentfix+exception")
assert "not upper case" not in str(excinfo.value)
# fix+ignore is not too useful, but it should warn about the fixed
# problems while saying nothing about the unfixable problems
hdu = make_invalid_hdu()
with pytest.warns(AstropyUserWarning, match="not upper case") as w:
hdu.verify("fix+ignore")
assert len(w) == 4
# fix+warn
hdu = make_invalid_hdu()
with pytest.warns(AstropyUserWarning) as w:
hdu.verify("fix+warn")
assert len(w) == 6
assert "not upper case" in str(w[2].message)
assert "Illegal keyword name" in str(w[4].message)
# fix+exception
hdu = make_invalid_hdu()
with pytest.raises(fits.VerifyError, match=r"Illegal keyword name") as excinfo:
hdu.verify("fix+exception")
assert "not upper case" in str(excinfo.value)
def test_getext(self):
"""
Test the various different ways of specifying an extension header in
the convenience functions.
"""
filename = self.data("test0.fits")
hl, ext = _getext(filename, "readonly", 1)
assert ext == 1
hl.close()
pytest.raises(ValueError, _getext, filename, "readonly", 1, 2)
pytest.raises(ValueError, _getext, filename, "readonly", (1, 2))
pytest.raises(ValueError, _getext, filename, "readonly", "sci", "sci")
pytest.raises(TypeError, _getext, filename, "readonly", 1, 2, 3)
hl, ext = _getext(filename, "readonly", ext=1)
assert ext == 1
hl.close()
hl, ext = _getext(filename, "readonly", ext=("sci", 2))
assert ext == ("sci", 2)
hl.close()
pytest.raises(
TypeError, _getext, filename, "readonly", 1, ext=("sci", 2), extver=3
)
pytest.raises(
TypeError, _getext, filename, "readonly", ext=("sci", 2), extver=3
)
hl, ext = _getext(filename, "readonly", "sci")
assert ext == ("sci", 1)
hl.close()
hl, ext = _getext(filename, "readonly", "sci", 1)
assert ext == ("sci", 1)
hl.close()
hl, ext = _getext(filename, "readonly", ("sci", 1))
assert ext == ("sci", 1)
hl.close()
hl, ext = _getext(
filename, "readonly", "sci", extver=1, do_not_scale_image_data=True
)
assert ext == ("sci", 1)
hl.close()
pytest.raises(TypeError, _getext, filename, "readonly", "sci", ext=1)
pytest.raises(TypeError, _getext, filename, "readonly", "sci", 1, extver=2)
hl, ext = _getext(filename, "readonly", extname="sci")
assert ext == ("sci", 1)
hl.close()
hl, ext = _getext(filename, "readonly", extname="sci", extver=1)
assert ext == ("sci", 1)
hl.close()
pytest.raises(TypeError, _getext, filename, "readonly", extver=1)
def test_extension_name_case_sensitive(self):
"""
Tests that setting fits.conf.extension_name_case_sensitive at
runtime works.
"""
hdu = fits.ImageHDU()
hdu.name = "sCi"
assert hdu.name == "SCI"
assert hdu.header["EXTNAME"] == "SCI"
with fits.conf.set_temp("extension_name_case_sensitive", True):
hdu = fits.ImageHDU()
hdu.name = "sCi"
assert hdu.name == "sCi"
assert hdu.header["EXTNAME"] == "sCi"
hdu.name = "sCi"
assert hdu.name == "SCI"
assert hdu.header["EXTNAME"] == "SCI"
def test_hdu_fromstring(self):
"""
Tests creating a fully-formed HDU object from a string containing the
bytes of the HDU.
"""
infile = self.data("test0.fits")
outfile = self.temp("test.fits")
with open(infile, "rb") as fin:
dat = fin.read()
offset = 0
with fits.open(infile) as hdul:
hdulen = hdul[0]._data_offset + hdul[0]._data_size
hdu = fits.PrimaryHDU.fromstring(dat[:hdulen])
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header == hdu.header
assert hdu.data is None
hdu.header["TEST"] = "TEST"
hdu.writeto(outfile)
with fits.open(outfile) as hdul:
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header[:-1] == hdu.header[:-1]
assert hdul[0].header["TEST"] == "TEST"
assert hdu.data is None
with fits.open(infile) as hdul:
for ext_hdu in hdul[1:]:
offset += hdulen
hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size
hdu = fits.ImageHDU.fromstring(dat[offset : offset + hdulen])
assert isinstance(hdu, fits.ImageHDU)
assert ext_hdu.header == hdu.header
assert (ext_hdu.data == hdu.data).all()
def test_nonstandard_hdu(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/157
Tests that "Nonstandard" HDUs with SIMPLE = F are read and written
without prepending a superfluous and unwanted standard primary HDU.
"""
data = np.arange(100, dtype=np.uint8)
hdu = fits.PrimaryHDU(data=data)
hdu.header["SIMPLE"] = False
hdu.writeto(self.temp("test.fits"))
info = [(0, "", 1, "NonstandardHDU", 5, (), "", "")]
with fits.open(self.temp("test.fits")) as hdul:
assert hdul.info(output=False) == info
# NonstandardHDUs just treat the data as an unspecified array of
# bytes. The first 100 bytes should match the original data we
# passed in...the rest should be zeros padding out the rest of the
# FITS block
assert (hdul[0].data[:100] == data).all()
assert (hdul[0].data[100:] == 0).all()
def test_extname(self):
"""Test getting/setting the EXTNAME of an HDU."""
h1 = fits.PrimaryHDU()
assert h1.name == "PRIMARY"
# Normally a PRIMARY HDU should not have an EXTNAME, though it should
# have a default .name attribute
assert "EXTNAME" not in h1.header
# The current version of the FITS standard does allow PRIMARY HDUs to
# have an EXTNAME, however.
h1.name = "NOTREAL"
assert h1.name == "NOTREAL"
assert h1.header.get("EXTNAME") == "NOTREAL"
# Updating the EXTNAME in the header should update the .name
h1.header["EXTNAME"] = "TOOREAL"
assert h1.name == "TOOREAL"
# If we delete an EXTNAME keyword from a PRIMARY HDU it should go back
# to the default
del h1.header["EXTNAME"]
assert h1.name == "PRIMARY"
# For extension HDUs the situation is a bit simpler:
h2 = fits.ImageHDU()
assert h2.name == ""
assert "EXTNAME" not in h2.header
h2.name = "HELLO"
assert h2.name == "HELLO"
assert h2.header.get("EXTNAME") == "HELLO"
h2.header["EXTNAME"] = "GOODBYE"
assert h2.name == "GOODBYE"
def test_extver_extlevel(self):
"""Test getting/setting the EXTVER and EXTLEVEL of and HDU."""
# EXTVER and EXTNAME work exactly the same; their semantics are, for
# now, to be inferred by the user. Although they should never be less
# than 1, the standard does not explicitly forbid any value so long as
# it's an integer
h1 = fits.PrimaryHDU()
assert h1.ver == 1
assert h1.level == 1
assert "EXTVER" not in h1.header
assert "EXTLEVEL" not in h1.header
h1.ver = 2
assert h1.header.get("EXTVER") == 2
h1.header["EXTVER"] = 3
assert h1.ver == 3
del h1.header["EXTVER"]
h1.ver == 1
h1.level = 2
assert h1.header.get("EXTLEVEL") == 2
h1.header["EXTLEVEL"] = 3
assert h1.level == 3
del h1.header["EXTLEVEL"]
assert h1.level == 1
pytest.raises(TypeError, setattr, h1, "ver", "FOO")
pytest.raises(TypeError, setattr, h1, "level", "BAR")
def test_consecutive_writeto(self):
"""
Regression test for an issue where calling writeto twice on the same
HDUList could write a corrupted file.
https://github.com/spacetelescope/PyFITS/issues/40 is actually a
particular instance of this problem, though isn't unique to sys.stdout.
"""
with fits.open(self.data("test0.fits")) as hdul1:
# Add a bunch of header keywords so that the data will be forced to
# new offsets within the file:
for idx in range(40):
hdul1[1].header[f"TEST{idx}"] = "test"
hdul1.writeto(self.temp("test1.fits"))
hdul1.writeto(self.temp("test2.fits"))
# Open a second handle to the original file and compare it to hdul1
# (We only compare part of the one header that was modified)
# Compare also with the second writeto output
with fits.open(self.data("test0.fits")) as hdul2:
with fits.open(self.temp("test2.fits")) as hdul3:
for hdul in (hdul1, hdul3):
for idx, hdus in enumerate(zip(hdul2, hdul)):
hdu2, hdu = hdus
if idx != 1:
assert hdu.header == hdu2.header
else:
assert hdu2.header == hdu.header[: len(hdu2.header)]
assert np.all(hdu.data == hdu2.data)
class TestConvenienceFunctions(FitsTestCase):
def test_writeto(self, home_is_temp):
"""
Simple test for writing a trivial header and some data to a file
with the `writeto()` convenience function.
"""
filename = self.temp("array.fits")
data = np.zeros((100, 100))
header = fits.Header()
fits.writeto(filename, data, header=header, overwrite=True)
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
def test_writeto_2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/107
Test of `writeto()` with a trivial header containing a single keyword.
"""
filename = self.temp("array.fits")
data = np.zeros((100, 100))
header = fits.Header()
header.set("CRPIX1", 1.0)
fits.writeto(
filename, data, header=header, overwrite=True, output_verify="silentfix"
)
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
assert "CRPIX1" in hdul[0].header
assert hdul[0].header["CRPIX1"] == 1.0
def test_writeto_overwrite(self, home_is_temp):
"""
Ensure the `overwrite` keyword works as it should
"""
filename = self.temp("array.fits")
data = np.zeros((100, 100))
header = fits.Header()
fits.writeto(filename, data, header=header)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
fits.writeto(filename, data, header=header, overwrite=False)
fits.writeto(filename, data, header=header, overwrite=True)
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
class TestFileFunctions(FitsTestCase):
"""
Tests various basic I/O operations, specifically in the
astropy.io.fits.file._File class.
"""
def test_open_nonexistent(self):
"""Test that trying to open a non-existent file results in an
OSError (and not some other arbitrary exception).
"""
with pytest.raises(OSError, match=r"No such file or directory"):
fits.open(self.temp("foobar.fits"))
# But opening in ostream or append mode should be okay, since they
# allow writing new files
for mode in ("ostream", "append"):
with fits.open(self.temp("foobar.fits"), mode=mode) as _:
pass
assert os.path.exists(self.temp("foobar.fits"))
os.remove(self.temp("foobar.fits"))
def test_open_file_handle(self):
# Make sure we can open a FITS file from an open file handle
with open(self.data("test0.fits"), "rb") as handle:
with fits.open(handle) as _:
pass
with open(self.temp("temp.fits"), "wb") as handle:
with fits.open(handle, mode="ostream") as _:
pass
# Opening without explicitly specifying binary mode should fail
with pytest.raises(ValueError):
with open(self.data("test0.fits")) as handle:
with fits.open(handle) as _:
pass
# All of these read modes should fail
for mode in ["r", "rt"]:
with pytest.raises(ValueError):
with open(self.data("test0.fits"), mode=mode) as handle:
with fits.open(handle) as _:
pass
# These update or write modes should fail as well
for mode in ["w", "wt", "w+", "wt+", "r+", "rt+", "a", "at", "a+", "at+"]:
with pytest.raises(ValueError):
with open(self.temp("temp.fits"), mode=mode) as handle:
with fits.open(handle) as _:
pass
def test_fits_file_handle_mode_combo(self):
# This should work fine since no mode is given
with open(self.data("test0.fits"), "rb") as handle:
with fits.open(handle) as _:
pass
# This should work fine since the modes are compatible
with open(self.data("test0.fits"), "rb") as handle:
with fits.open(handle, mode="readonly") as _:
pass
# This should not work since the modes conflict
with pytest.raises(ValueError):
with open(self.data("test0.fits"), "rb") as handle:
with fits.open(handle, mode="ostream") as _:
pass
def test_open_from_url(self):
file_url = "file:///" + self.data("test0.fits").lstrip("/")
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj) as _:
pass
# It will not be possible to write to a file that is from a URL object
for mode in ("ostream", "append", "update"):
with pytest.raises(ValueError):
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj, mode=mode) as _:
pass
@pytest.mark.remote_data(source="astropy")
def test_open_from_remote_url(self):
for dataurl in (conf.dataurl, conf.dataurl_mirror):
remote_url = f"{dataurl}/allsky/allsky_rosat.fits"
try:
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj) as fits_handle:
assert len(fits_handle) == 1
for mode in ("ostream", "append", "update"):
with pytest.raises(ValueError):
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj, mode=mode) as fits_handle:
assert len(fits_handle) == 1
except (urllib.error.HTTPError, urllib.error.URLError):
continue
else:
break
else:
raise Exception("Could not download file")
def test_open_gzipped(self):
gzip_file = self._make_gzip_file()
with fits.open(gzip_file) as fits_handle:
assert fits_handle._file.compression == "gzip"
assert len(fits_handle) == 5
with fits.open(gzip.GzipFile(gzip_file)) as fits_handle:
assert fits_handle._file.compression == "gzip"
assert len(fits_handle) == 5
def test_open_gzipped_from_handle(self):
with open(self._make_gzip_file(), "rb") as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == "gzip"
def test_detect_gzipped(self):
"""Test detection of a gzip file when the extension is not .gz."""
with fits.open(self._make_gzip_file("test0.fz")) as fits_handle:
assert fits_handle._file.compression == "gzip"
assert len(fits_handle) == 5
def test_writeto_append_mode_gzip(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/33
Check that a new GzipFile opened in append mode can be used to write
out a new FITS file.
"""
# Note: when opening a GzipFile the 'b+' is superfluous, but this was
# still how the original test case looked
# Note: with statement not supported on GzipFile in older Python
# versions
fileobj = gzip.GzipFile(self.temp("test.fits.gz"), "ab+")
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp("test.fits.gz")) as hdul:
assert hdul[0].header == h.header
def test_fits_update_mode_gzip(self):
"""Test updating a GZipped FITS file"""
with fits.open(self._make_gzip_file("update.gz"), mode="update") as fits_handle:
hdu = fits.ImageHDU(data=[x for x in range(100)])
fits_handle.append(hdu)
with fits.open(self.temp("update.gz")) as new_handle:
assert len(new_handle) == 6
assert (new_handle[-1].data == [x for x in range(100)]).all()
def test_fits_append_mode_gzip(self):
"""Make sure that attempting to open an existing GZipped FITS file in
'append' mode raises an error"""
with pytest.raises(OSError):
with fits.open(self._make_gzip_file("append.gz"), mode="append") as _:
pass
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_open_bzipped(self):
bzip_file = self._make_bzip2_file()
with fits.open(bzip_file) as fits_handle:
assert fits_handle._file.compression == "bzip2"
assert len(fits_handle) == 5
with fits.open(bz2.BZ2File(bzip_file)) as fits_handle:
assert fits_handle._file.compression == "bzip2"
assert len(fits_handle) == 5
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_open_bzipped_from_handle(self):
with open(self._make_bzip2_file(), "rb") as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == "bzip2"
assert len(fits_handle) == 5
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_detect_bzipped(self):
"""Test detection of a bzip2 file when the extension is not .bz2."""
with fits.open(self._make_bzip2_file("test0.xx")) as fits_handle:
assert fits_handle._file.compression == "bzip2"
assert len(fits_handle) == 5
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_writeto_bzip2_fileobj(self):
"""Test writing to a bz2.BZ2File file like object"""
fileobj = bz2.BZ2File(self.temp("test.fits.bz2"), "w")
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp("test.fits.bz2")) as hdul:
assert hdul[0].header == h.header
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_writeto_bzip2_filename(self):
"""Test writing to a bzip2 file by name"""
filename = self.temp("testname.fits.bz2")
h = fits.PrimaryHDU()
h.writeto(filename)
with fits.open(self.temp("testname.fits.bz2")) as hdul:
assert hdul[0].header == h.header
def test_open_zipped(self):
zip_file = self._make_zip_file()
with fits.open(zip_file) as fits_handle:
assert fits_handle._file.compression == "zip"
assert len(fits_handle) == 5
with fits.open(zipfile.ZipFile(zip_file)) as fits_handle:
assert fits_handle._file.compression == "zip"
assert len(fits_handle) == 5
def test_open_zipped_from_handle(self):
with open(self._make_zip_file(), "rb") as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == "zip"
assert len(fits_handle) == 5
def test_detect_zipped(self):
"""Test detection of a zip file when the extension is not .zip."""
zf = self._make_zip_file(filename="test0.fz")
with fits.open(zf) as fits_handle:
assert len(fits_handle) == 5
def test_open_zipped_writeable(self):
"""Opening zipped files in a writeable mode should fail."""
zf = self._make_zip_file()
pytest.raises(OSError, fits.open, zf, "update")
pytest.raises(OSError, fits.open, zf, "append")
zf = zipfile.ZipFile(zf, "a")
pytest.raises(OSError, fits.open, zf, "update")
pytest.raises(OSError, fits.open, zf, "append")
def test_read_open_astropy_gzip_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2774
This tests reading from a ``GzipFile`` object from Astropy's
compatibility copy of the ``gzip`` module.
"""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_multiple_member_zipfile(self):
"""
Opening zip files containing more than one member files should fail
as there's no obvious way to specify which file is the FITS file to
read.
"""
zfile = zipfile.ZipFile(self.temp("test0.zip"), "w")
zfile.write(self.data("test0.fits"))
zfile.writestr("foo", "bar")
zfile.close()
with pytest.raises(OSError):
fits.open(zfile.filename)
def test_read_open_file(self):
"""Read from an existing file object."""
with open(self.data("test0.fits"), "rb") as f:
assert len(fits.open(f)) == 5
def test_read_closed_file(self):
"""Read from an existing file object that's been closed."""
f = open(self.data("test0.fits"), "rb")
f.close()
with fits.open(f) as f2:
assert len(f2) == 5
def test_read_open_gzip_file(self):
"""Read from an open gzip file object."""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_gzip_file_for_writing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195."""
gf = self._make_gzip_file()
with fits.open(gf, mode="update") as h:
h[0].header["EXPFLAG"] = "ABNORMAL"
h[1].data[0, 0] = 1
with fits.open(gf) as h:
# Just to make sure the update worked; if updates work
# normal writes should work too...
assert h[0].header["EXPFLAG"] == "ABNORMAL"
assert h[1].data[0, 0] == 1
def test_write_read_gzip_file(self, home_is_temp):
"""
Regression test for https://github.com/astropy/astropy/issues/2794
Ensure files written through gzip are readable.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
hdu.writeto(self.temp("test.fits.gz"))
with open(os.path.expanduser(self.temp("test.fits.gz")), "rb") as f:
assert f.read(3) == GZIP_MAGIC
with fits.open(self.temp("test.fits.gz")) as hdul:
assert np.all(hdul[0].data == data)
@pytest.mark.parametrize("ext", ["gz", "bz2", "zip"])
def test_compressed_ext_but_not_compressed(self, ext):
testfile = self.temp(f"test0.fits.{ext}")
shutil.copy(self.data("test0.fits"), testfile)
with fits.open(testfile) as hdul:
assert len(hdul) == 5
fits.append(testfile, np.arange(5))
with fits.open(testfile) as hdul:
assert len(hdul) == 6
def test_read_file_like_object(self):
"""Test reading a FITS file from a file-like object."""
filelike = io.BytesIO()
with open(self.data("test0.fits"), "rb") as f:
filelike.write(f.read())
filelike.seek(0)
assert len(fits.open(filelike)) == 5
def test_updated_file_permissions(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79
Tests that when a FITS file is modified in update mode, the file
permissions are preserved.
"""
filename = self.temp("test.fits")
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul.writeto(filename)
old_mode = os.stat(filename).st_mode
hdul = fits.open(filename, mode="update")
hdul.insert(1, fits.ImageHDU())
hdul.flush()
hdul.close()
assert old_mode == os.stat(filename).st_mode
def test_fileobj_mode_guessing(self):
"""Tests whether a file opened without a specified io.fits mode
('readonly', etc.) is opened in a mode appropriate for the given file
object.
"""
self.copy_file("test0.fits")
# Opening in text mode should outright fail
for mode in ("r", "w", "a"):
with open(self.temp("test0.fits"), mode) as f:
pytest.raises(ValueError, fits.HDUList.fromfile, f)
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file("test0.fits")
with open(self.temp("test0.fits"), "rb") as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)["filemode"] == "readonly"
for mode in ("wb", "ab"):
with open(self.temp("test0.fits"), mode) as f:
with fits.HDUList.fromfile(f) as h:
# Basically opening empty files for output streaming
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file("test0.fits")
with open(self.temp("test0.fits"), "wb+") as f:
with fits.HDUList.fromfile(f) as h:
# wb+ still causes an existing file to be overwritten so there
# are no HDUs
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file("test0.fits")
with open(self.temp("test0.fits"), "rb+") as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)["filemode"] == "update"
with open(self.temp("test0.fits"), "ab+") as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)["filemode"] == "append"
def test_mmap_unwriteable(self):
"""Regression test for https://github.com/astropy/astropy/issues/968
Temporarily patches mmap.mmap to exhibit platform-specific bad
behavior.
"""
class MockMmap(mmap.mmap):
def flush(self):
raise OSError("flush is broken on this platform")
old_mmap = mmap.mmap
mmap.mmap = MockMmap
# Force the mmap test to be rerun
_File.__dict__["_mmap_available"]._cache.clear()
try:
self.copy_file("test0.fits")
with pytest.warns(
AstropyUserWarning, match=r"mmap\.flush is unavailable"
) as w:
with fits.open(
self.temp("test0.fits"), mode="update", memmap=True
) as h:
h[1].data[0, 0] = 999
assert len(w) == 1
# Double check that writing without mmap still worked
with fits.open(self.temp("test0.fits")) as h:
assert h[1].data[0, 0] == 999
finally:
mmap.mmap = old_mmap
_File.__dict__["_mmap_available"]._cache.clear()
def test_mmap_allocate_error(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1380
Temporarily patches mmap.mmap to raise an OSError if mode is ACCESS_COPY.
"""
mmap_original = mmap.mmap
# We patch mmap here to raise an error if access=mmap.ACCESS_COPY, which
# emulates an issue that an OSError is raised if the available address
# space is less than the size of the file even if memory mapping is used.
def mmap_patched(*args, **kwargs):
if kwargs.get("access") == mmap.ACCESS_COPY:
exc = OSError()
exc.errno = errno.ENOMEM
raise exc
else:
return mmap_original(*args, **kwargs)
with fits.open(self.data("test0.fits"), memmap=True) as hdulist:
with patch.object(mmap, "mmap", side_effect=mmap_patched) as p:
with pytest.warns(
AstropyUserWarning,
match=r"Could not memory map array with mode='readonly'",
):
data = hdulist[1].data
p.reset_mock()
assert not data.flags.writeable
def test_mmap_closing(self):
"""
Tests that the mmap reference is closed/removed when there aren't any
HDU data references left.
"""
if not _File._mmap_available:
pytest.xfail("not expected to work on platforms without mmap support")
with fits.open(self.data("test0.fits"), memmap=True) as hdul:
assert hdul._file._mmap is None
hdul[1].data
assert hdul._file._mmap is not None
del hdul[1].data
# Should be no more references to data in the file so close the
# mmap
assert hdul._file._mmap is None
hdul[1].data
hdul[2].data
del hdul[1].data
# hdul[2].data is still references so keep the mmap open
assert hdul._file._mmap is not None
del hdul[2].data
assert hdul._file._mmap is None
assert hdul._file._mmap is None
with fits.open(self.data("test0.fits"), memmap=True) as hdul:
hdul[1].data
# When the only reference to the data is on the hdu object, and the
# hdulist it belongs to has been closed, the mmap should be closed as
# well
assert hdul._file._mmap is None
with fits.open(self.data("test0.fits"), memmap=True) as hdul:
data = hdul[1].data
# also make a copy
data_copy = data.copy()
# The HDUList is closed; in fact, get rid of it completely
del hdul
# The data array should still work though...
assert np.all(data == data_copy)
def test_uncloseable_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2356
Demonstrates that FITS files can still be read from file-like objects
that don't have an obvious "open" or "closed" state.
"""
class MyFileLike:
def __init__(self, foobar):
self._foobar = foobar
def read(self, n):
return self._foobar.read(n)
def seek(self, offset, whence=os.SEEK_SET):
self._foobar.seek(offset, whence)
def tell(self):
return self._foobar.tell()
with open(self.data("test0.fits"), "rb") as f:
fileobj = MyFileLike(f)
with fits.open(fileobj) as hdul1:
with fits.open(self.data("test0.fits")) as hdul2:
assert hdul1.info(output=False) == hdul2.info(output=False)
for hdu1, hdu2 in zip(hdul1, hdul2):
assert hdu1.header == hdu2.header
if hdu1.data is not None and hdu2.data is not None:
assert np.all(hdu1.data == hdu2.data)
def test_write_bytesio_discontiguous(self):
"""
Regression test related to
https://github.com/astropy/astropy/issues/2794#issuecomment-55441539
Demonstrates that writing an HDU containing a discontiguous Numpy array
should work properly.
"""
data = np.arange(100)[::3]
hdu = fits.PrimaryHDU(data=data)
fileobj = io.BytesIO()
hdu.writeto(fileobj)
fileobj.seek(0)
with fits.open(fileobj) as h:
assert np.all(h[0].data == data)
def test_write_bytesio(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2463
Test against `io.BytesIO`. `io.StringIO` is not supported.
"""
self._test_write_string_bytes_io(io.BytesIO())
@pytest.mark.skipif(
sys.platform.startswith("win32"), reason="Cannot test on Windows"
)
def test_filename_with_colon(self):
"""
Test reading and writing a file with a colon in the filename.
Regression test for https://github.com/astropy/astropy/issues/3122
"""
# Skip on Windows since colons in filenames makes NTFS sad.
filename = "APEXHET.2014-04-01T15:18:01.000.fits"
hdu = fits.PrimaryHDU(data=np.arange(10))
hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert np.all(hdul[0].data == hdu.data)
def test_writeto_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to write an hdulist
to a full disk.
"""
def _writeto(self, array):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
msg = (
"Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file."
)
with pytest.raises(OSError, match=msg) as exc:
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writeto", _writeto)
monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir)
n = np.arange(0, 1000, dtype="int64")
hdu = fits.PrimaryHDU(n)
hdulist = fits.HDUList(hdu)
filename = self.temp("test.fits")
with open(filename, mode="wb") as fileobj:
hdulist.writeto(fileobj)
def test_flush_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to update an hdulist
to a full disk.
"""
filename = self.temp("test.fits")
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul[0].data = np.arange(0, 1000, dtype="int64")
hdul.writeto(filename)
def _writedata(self, fileobj):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writedata", _writedata)
monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir)
msg = (
"Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file."
)
with pytest.raises(OSError, match=msg) as exc:
with fits.open(filename, mode="update") as hdul:
hdul[0].data = np.arange(0, 1000, dtype="int64")
hdul.insert(1, fits.ImageHDU())
hdul.flush()
def _test_write_string_bytes_io(self, fileobj):
"""
Implemented for both test_write_stringio and test_write_bytesio.
"""
with fits.open(self.data("test0.fits")) as hdul:
hdul.writeto(fileobj)
hdul2 = fits.HDUList.fromstring(fileobj.getvalue())
assert FITSDiff(hdul, hdul2).identical
def _make_gzip_file(self, filename="test0.fits.gz"):
gzfile = self.temp(filename)
with open(self.data("test0.fits"), "rb") as f:
gz = gzip.open(gzfile, "wb")
gz.write(f.read())
gz.close()
return gzfile
def test_write_overwrite(self, home_is_temp):
filename = self.temp("test_overwrite.fits")
hdu = fits.PrimaryHDU(data=np.arange(10))
hdu.writeto(filename)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdu.writeto(filename)
hdu.writeto(filename, overwrite=True)
def _make_zip_file(self, mode="copyonwrite", filename="test0.fits.zip"):
zfile = zipfile.ZipFile(self.temp(filename), "w")
zfile.write(self.data("test0.fits"))
zfile.close()
return zfile.filename
def _make_bzip2_file(self, filename="test0.fits.bz2"):
bzfile = self.temp(filename)
with open(self.data("test0.fits"), "rb") as f:
bz = bz2.BZ2File(bzfile, "w")
bz.write(f.read())
bz.close()
return bzfile
def test_simulateonly(self):
"""Write to None simulates writing."""
with fits.open(self.data("test0.fits")) as hdul:
hdul.writeto(None)
hdul[0].writeto(None)
hdul[0].header.tofile(None)
def test_bintablehdu_zero_bytes(self):
"""Make sure we don't have any zero-byte writes in BinTableHDU"""
bright = np.rec.array(
[
(1, "Sirius", -1.45, "A1V"),
(2, "Canopus", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float32,a10",
names="order,name,mag,Sp",
)
hdu_non_zero = fits.BinTableHDU(bright)
# use safeio, a special file handler meant to fail on zero-byte writes
fh = safeio.CatchZeroByteWriter(open(self.temp("bright.fits"), mode="wb"))
hdu_non_zero.writeto(fh)
fh.close()
def test_primaryhdu_zero_bytes(self):
"""
Make sure we don't have any zero-byte writes from an ImageHDU
(or other) of `size % BLOCK_SIZE == 0`
"""
hdu_img_2880 = fits.PrimaryHDU(data=np.arange(720, dtype="i4"))
# use safeio, a special file handler meant to fail on zero-byte writes
fh = safeio.CatchZeroByteWriter(open(self.temp("image.fits"), mode="wb"))
hdu_img_2880.writeto(fh)
fh.close()
class TestStreamingFunctions(FitsTestCase):
"""Test functionality of the StreamingHDU class."""
def test_streaming_hdu(self, home_is_temp):
shdu = self._make_streaming_hdu(self.temp("new.fits"))
assert isinstance(shdu.size, int)
assert shdu.size == 100
arr = np.arange(25, dtype=np.int32).reshape((5, 5))
shdu.write(arr)
assert shdu.writecomplete
shdu.close()
with fits.open(self.temp("new.fits")) as hdul:
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_file_wrong_mode(self):
"""
Test that streaming an HDU to a file opened in the wrong mode fails as
expected.
"""
with pytest.raises(ValueError):
with open(self.temp("new.fits"), "wb") as f:
header = fits.Header()
fits.StreamingHDU(f, header)
def test_streaming_hdu_write_file(self):
"""Test streaming an HDU to an open file object."""
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp("new.fits"), "ab+") as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
with fits.open(self.temp("new.fits")) as hdul:
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_write_file_like(self):
"""Test streaming an HDU to an open file-like object."""
arr = np.zeros((5, 5), dtype=np.int32)
# The file-like object underlying a StreamingHDU must be in binary mode
sf = io.BytesIO()
shdu = self._make_streaming_hdu(sf)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_append_extension(self):
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp("new.fits"), "ab+") as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
# Doing this again should update the file with an extension
with open(self.temp("new.fits"), "ab+") as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
def test_fix_invalid_extname(self, capsys):
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU()
ihdu.header["EXTNAME"] = 12345678
hdul = fits.HDUList([phdu, ihdu])
filename = self.temp("temp.fits")
pytest.raises(
fits.VerifyError, hdul.writeto, filename, output_verify="exception"
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
hdul.writeto(filename, output_verify="fix")
with fits.open(filename):
assert hdul[1].name == "12345678"
assert hdul[1].header["EXTNAME"] == "12345678"
hdul.close()
def _make_streaming_hdu(self, fileobj):
hd = fits.Header()
hd["SIMPLE"] = (True, "conforms to FITS standard")
hd["BITPIX"] = (32, "array data type")
hd["NAXIS"] = (2, "number of array dimensions")
hd["NAXIS1"] = 5
hd["NAXIS2"] = 5
hd["EXTEND"] = True
return fits.StreamingHDU(fileobj, hd)
def test_blank_ignore(self):
with fits.open(self.data("blank.fits"), ignore_blank=True) as f:
assert f[0].data.flat[0] == 2
def test_error_if_memmap_impossible(self):
pth = self.data("blank.fits")
with fits.open(pth, memmap=True) as hdul:
with pytest.raises(ValueError):
hdul[0].data
# However, it should not fail if do_not_scale_image_data was used:
# See https://github.com/astropy/astropy/issues/3766
with fits.open(pth, memmap=True, do_not_scale_image_data=True) as hdul:
hdul[0].data # Just make sure it doesn't crash
|
c51b52381453588ac1d14f311729b42a6cb133b1863a338dc4897c03f9dccadd | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import math
import os
import re
import time
from io import BytesIO
from itertools import product
import numpy as np
import pytest
from hypothesis import given
from hypothesis.extra.numpy import basic_indices
from numpy.testing import assert_equal
from astropy.io import fits
from astropy.io.fits.hdu.compressed import (
COMPRESSION_TYPES,
DITHER_SEED_CHECKSUM,
SUBTRACTIVE_DITHER_1,
)
from astropy.utils.data import download_file, get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from .conftest import FitsTestCase
from .test_table import comparerecords
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = fits.ImageHDU(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = fits.ImageHDU(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header["EXTVER"] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert "EXTVER" not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr["EXTVER"] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr["FILENAME"] = "labq01i3q_rawtag.fits"
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert phdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data("test0.fits")) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == (
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
def test_open_2(self):
r = fits.open(self.data("test0.fits"))
info = [(0, "PRIMARY", 1, "PrimaryHDU", 138, (), "", "")] + [
(x, "SCI", x, "ImageHDU", 61, (40, 40), "int16", "") for x in range(1, 5)
]
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data("test0.fits"))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == (
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data("test0.fits"), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == "list index out of range"
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data("test0.fits"))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3, 7)
b = np.asfortranarray(a)
afits = self.temp("a_str.fits")
bfits = self.temp("b_str.fits")
# writing to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writing to fileobjs
aafits = self.temp("a_fileobj.fits")
bbfits = self.temp("b_fileobj.fits")
with open(aafits, mode="wb") as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode="wb") as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3, 5, 7)
b = np.asfortranarray(a)
# writing to str specified files
afits = self.temp("a_str_slice.fits")
bfits = self.temp("b_str_slice.fits")
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writing to fileobjs
aafits = self.temp("a_fileobj_slice.fits")
bbfits = self.temp("b_fileobj_slice.fits")
with open(aafits, mode="wb") as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode="wb") as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([("EXTNAME", "XPRIMARY"), ("EXTVER", 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert "EXTNAME" in hdul[0].header
assert hdul[0].name == "XPRIMARY"
assert hdul[0].name == hdul[0].header["EXTNAME"]
info = [(0, "XPRIMARY", 1, "PrimaryHDU", 5, (), "", "")]
assert hdul.info(output=False) == info
assert hdul["PRIMARY"] is hdul["XPRIMARY"]
assert hdul["PRIMARY"] is hdul[("XPRIMARY", 1)]
hdul[0].name = "XPRIMARY2"
assert hdul[0].header["EXTNAME"] == "XPRIMARY2"
hdul.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[0].name == "XPRIMARY2"
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data("test0.fits")) as r:
assert r["primary"].header["naxis"] == 0
assert r[0].header["naxis"] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r["sci", 1].header["detector"] == 1
# append (using "update()") a new card
r[0].header["xxx"] = 1.234e56
assert (
"\n".join(str(x) for x in r[0].header.cards[-3:])
== "EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 "
)
# rename a keyword
r[0].header.rename_keyword("filename", "fname")
pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "history")
pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "simple")
r[0].header.rename_keyword("fname", "filename")
# get a subsection of data
assert np.array_equal(
r[2].data[:3, :3],
np.array(
[[349, 349, 348], [349, 349, 347], [347, 350, 349]], dtype=np.int16
),
)
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp("test_new.fits"), mode="append") as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp("test_new.fits"), self.temp("test_append.fits"))
with fits.open(self.temp("test_append.fits"), mode="append") as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp("test_append.fits"), self.temp("test_update.fits"))
with fits.open(self.temp("test_update.fits"), mode="update") as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header["rootname"] == "U2EQ0201T"
u[0].header["rootname"] = "abc"
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# The write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp("test_new.fits"))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data("test0.fits")) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name="SCI")
assert np.array_equal(
hdu.data,
np.array(
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
],
dtype=np.float32,
),
)
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2], dtype="int32"))
assert (
"\n".join(str(x) for x in hdu2.header.cards[1:5])
== "BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters "
)
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data("test0.fits"), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with pytest.warns(
AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\."
) as w:
hdu.verify()
assert len(w) == 3
with pytest.warns(
AstropyUserWarning,
match=r"HDUList's 0th element is not a primary HDU\. "
r"Fixed by inserting one as 0th HDU\.",
) as w:
hdu.writeto(self.temp("test_new2.fits"), "fix")
assert len(w) == 3
def test_section(self):
# section testing
fs = fits.open(self.data("arange.fits"))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]),
)
assert np.array_equal(
fs[0].section[3, 2, 4:], np.array([356, 357, 358, 359, 360, 361, 362])
)
assert np.array_equal(
fs[0].section[3, 2, :8], np.array([352, 353, 354, 355, 356, 357, 358, 359])
)
assert np.array_equal(
fs[0].section[3, 2, -8:8], np.array([355, 356, 357, 358, 359])
)
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array(
[
[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384],
]
),
)
assert np.array_equal(
fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332], [341, 342, 343], [352, 353, 354]]),
)
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(
fs[0].section[3:6, :, :][:3, :3, :3],
np.array(
[
[[330, 331, 332], [341, 342, 343], [352, 353, 354]],
[[440, 441, 442], [451, 452, 453], [462, 463, 464]],
[[550, 551, 552], [561, 562, 563], [572, 573, 574]],
]
),
)
assert np.array_equal(
fs[0].section[:, :, :][:3, :2, :2],
np.array(
[[[0, 1], [11, 12]], [[110, 111], [121, 122]], [[220, 221], [231, 232]]]
),
)
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3], dat[:, [1, 2, 4], 3])
bool_index = np.array(
[True, False, True, True, False, False, True, True, False, True]
)
assert np.array_equal(fs[0].section[:, bool_index, :], dat[:, bool_index, :])
assert np.array_equal(fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3], dat[..., [1, 2, 4], 3])
# Can we use negative indices?
assert np.array_equal(fs[0].section[-1], dat[-1])
assert np.array_equal(fs[0].section[-9:-7], dat[-9:-7])
assert np.array_equal(fs[0].section[-4, -6:-3, -1], dat[-4, -6:-3, -1])
fs.close()
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
hdul.close()
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:] == dat[:]).all()
assert (d.section[:, :] == dat[:, :]).all()
# Test that various combinations of indexing on the section are equal to
# indexing the data.
# Testing all combinations of scalar-index and [:] for each dimension.
for idx1 in [slice(None), 0, 1]:
for idx2 in [slice(None), 0, 1, 2]:
for idx3 in [slice(None), 0, 1, 2]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test all ways to slice the last dimension but keeping the first two.
for idx3 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
slice(2, 3),
]:
nd_idx = (slice(None), slice(None), idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test various combinations (not exhaustive) to slice all dimensions.
for idx1 in [slice(0, 1), slice(1, 2)]:
for idx2 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
]:
for idx3 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
slice(2, 3),
]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
hdul.close()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
hdul.close()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data("scale.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
# Test without having accessed the full data first
hdul = fits.open(self.data("scale.fits"))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
hdul.close()
def test_do_not_scale_image_data(self):
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[0].data.dtype == np.dtype(">i2")
with fits.open(self.data("scale.fits")) as hdul:
assert hdul[0].data.dtype == np.dtype("float32")
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp("test_new.fits"), data=np.array([], dtype="uint8"))
d = np.zeros([100, 100]).astype("uint16")
fits.append(self.temp("test_new.fits"), data=d)
with fits.open(self.temp("test_new.fits"), uint=True) as f:
assert f[1].data.dtype == "uint16"
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu2 = fits.ImageHDU(np.random.rand(100, 100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type="uint8", bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the appropriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2**int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = f"uint{int_size}"
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == f"uint{int_size}"
assert "BZERO" in uint_hdu.header
assert uint_hdu.header["BZERO"] == (2 ** (int_size - 1))
filename = f"uint{int_size}.fits"
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == f"uint{int_size}"
assert "BZERO" in new_uint_hdu.header
assert new_uint_hdu.header["BZERO"] == (2 ** (int_size - 1))
@pytest.mark.parametrize(("from_file"), (False, True))
@pytest.mark.parametrize(("do_not_scale"), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(
self, from_file, do_not_scale
):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype="uint16")
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = "unsigned_int.fits"
tmp_uint.writeto(self.temp(filename))
with fits.open(
self.temp(filename), do_not_scale_image_data=do_not_scale
) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr, do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert "BSCALE" in uint_hdu.header
assert "BZERO" in uint_hdu.header
assert uint_hdu.header["BSCALE"] == 1
assert uint_hdu.header["BZERO"] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header["BITPIX"] < 0
# BSCALE and BZERO should NOT be in header any more.
assert "BSCALE" not in uint_hdu.header
assert "BZERO" not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = "test_uint_to_float.fits"
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header["BLANK"] = 999
hdu.writeto(self.temp("test_new.fits"))
with fits.open(self.temp("test_new.fits")) as hdul:
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header["BLANK"] = 2
with pytest.warns(
AstropyUserWarning, match="Invalid 'BLANK' keyword in header"
) as w:
hdu.writeto(self.temp("test_new.fits"))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with pytest.warns(
AstropyUserWarning, match="Invalid 'BLANK' keyword in header"
) as w:
with fits.open(self.temp("test_new.fits")) as h:
assert np.all(arr == h[0].data)
assert len(w) == 1
@pytest.mark.filterwarnings("ignore:Invalid 'BLANK' keyword in header")
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale("int16", bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
filename = self.temp("test.fits")
hdu.data[0] = 9999
hdu.header["BLANK"] = 9999
hdu.writeto(filename)
with fits.open(filename) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
with pytest.warns(
fits.verify.VerifyWarning, match=r"Invalid 'BLANK' keyword in header"
):
hdul.writeto(self.temp("test2.fits"))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with fits.open(self.temp("test2.fits")) as hdul2:
assert "BLANK" not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(filename, scale_back=True, mode="update") as hdul3:
data = hdul3[0].data
# This emits warning that pytest cannot catch properly, so we
# catch it with pytest.mark.filterwarnings above.
assert np.isnan(data[0])
with fits.open(filename, do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header["BLANK"] == 9999
assert hdul4[0].header["BSCALE"] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header["BZERO"] = 1.0
hdu.writeto(self.temp("test_new.fits"))
with fits.open(self.temp("test_new.fits")) as hdul:
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data("fixed-1890.fits"))
orig_data = hdul[0].data
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data("fixed-1890.fits"))
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data("fixed-1890.fits"), do_not_scale_image_data=True)
hdul.writeto(
self.temp("test_new.fits"), overwrite=True, output_verify="silentfix"
)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp("test_new.fits"), mode="update")
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file("test0.fits")
with fits.open(self.temp("test0.fits"), mode="update") as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy["NAXIS*"]
hdul[1].header = hdr_copy
with fits.open(self.temp("test0.fits")) as hdul:
assert (orig_data == hdul[1].data).all()
# The test below raised a `ResourceWarning: unclosed transport` exception
# due to a bug in Python <=3.10 (cf. cpython#90476)
@pytest.mark.filterwarnings("ignore:unclosed transport <asyncio.sslproto")
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file("scale.fits")
mtime = os.stat(self.temp("scale.fits")).st_mtime
time.sleep(1)
fits.open(self.temp("scale.fits"), mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp("scale.fits")).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp("scale.fits"), "update")
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp("scale.fits")).st_mtime
hdul = fits.open(self.temp("scale.fits"), mode="update")
assert hdul[0].data.dtype == np.dtype(">f4")
assert hdul[0].header["BITPIX"] == -32
assert "BZERO" not in hdul[0].header
assert "BSCALE" not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp("scale.fits"))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype(">f4")
assert hdul[0].header["BITPIX"] == -32
assert "BZERO" not in hdul[0].header
assert "BSCALE" not in hdul[0].header
hdul.close()
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file("scale.fits")
with fits.open(self.temp("scale.fits"), mode="update", scale_back=True) as hdul:
orig_bitpix = hdul[0].header["BITPIX"]
orig_bzero = hdul[0].header["BZERO"]
orig_bscale = hdul[0].header["BSCALE"]
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[0].header["BITPIX"] == orig_bitpix
assert hdul[0].header["BZERO"] == orig_bzero
assert hdul[0].header["BSCALE"] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp("scale.fits")) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("test0.fits")) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].data is None
assert h[1].header["NAXIS"] == 0
assert "NAXIS1" not in h[1].header
assert "NAXIS2" not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header["BLANK"] = "nan"
with pytest.warns(
fits.verify.VerifyWarning,
match=r"Invalid value for 'BLANK' keyword in header: 'nan'",
):
hdu.writeto(self.temp("test.fits"))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale("int16", bzero=99.0)
hdu2.scale("int16", bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update", scale_back=True) as (
hdu,
):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
def test_hdu_creation_with_scalar(self):
msg = r"data object array\(1\) should have at least one dimension"
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=1)
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=1)
# Regression test for https://github.com/astropy/astropy/issues/14527
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=np.array(1))
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=np.array(1))
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update") as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp("test.fits")) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
("data", "compression_type", "quantize_level"),
[
(np.zeros((2, 10, 10), dtype=np.float32), "RICE_1", 16),
(np.zeros((2, 10, 10), dtype=np.float32), "GZIP_1", -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), "GZIP_2", -0.01),
(np.zeros((100, 100)) + 1, "HCOMPRESS_1", 16),
(np.zeros((10, 10)), "PLIO_1", 16),
],
)
@pytest.mark.parametrize("byte_order", ["<", ">"])
def test_comp_image(self, data, compression_type, quantize_level, byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(
data,
name="SCI",
compression_type=compression_type,
quantize_level=quantize_level,
)
ofd.append(chdu)
ofd.writeto(self.temp("test_new.fits"), overwrite=True)
ofd.close()
with fits.open(self.temp("test_new.fits")) as fd:
assert (fd[1].data == data).all()
assert fd[1].header["NAXIS"] == chdu.header["NAXIS"]
assert fd[1].header["NAXIS1"] == chdu.header["NAXIS1"]
assert fd[1].header["NAXIS2"] == chdu.header["NAXIS2"]
assert fd[1].header["BITPIX"] == chdu.header["BITPIX"]
@pytest.mark.remote_data
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import pickle
np.random.seed(42)
# Basically what scipy.datasets.ascent() does.
fname = download_file(
"https://github.com/scipy/dataset-ascent/blob/main/ascent.dat?raw=true"
)
with open(fname, "rb") as f:
scipy_data = np.array(pickle.load(f))
data = scipy_data + np.random.randn(512, 512) * 10
fits.ImageHDU(data).writeto(self.temp("im1.fits"))
fits.CompImageHDU(
data,
compression_type="RICE_1",
quantize_method=1,
quantize_level=-1,
dither_seed=5,
).writeto(self.temp("im2.fits"))
fits.CompImageHDU(
data,
compression_type="RICE_1",
quantize_method=1,
quantize_level=-100,
dither_seed=5,
).writeto(self.temp("im3.fits"))
im1 = fits.getdata(self.temp("im1.fits"))
im2 = fits.getdata(self.temp("im2.fits"))
im3 = fits.getdata(self.temp("im3.fits"))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(
ValueError,
fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32),
name="SCI",
compression_type="HCOMPRESS_1",
quantize_level=16,
tile_shape=(2, 10, 10),
)
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(
data=cube,
name="SCI",
compression_type="HCOMPRESS_1",
quantize_level=16,
tile_shape=(1, 5, 5),
)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul["SCI"].data - cube).max() < 1.0 / 15.0
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view("uint8").sum() % 10000) + 1
hdu = fits.CompImageHDU(
data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM,
)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert "ZQUANTIZ" in hdul[1]._header
assert hdul[1]._header["ZQUANTIZ"] == "SUBTRACTIVE_DITHER_1"
assert "ZDITHER0" in hdul[1]._header
assert hdul[1]._header["ZDITHER0"] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with fits.open(self.data("comp.fits"), disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data("comp.fits")) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file("comp.fits")
mtime = os.stat(self.temp("comp.fits")).st_mtime
time.sleep(1)
fits.open(self.temp("comp.fits"), mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp("comp.fits")).st_mtime
@pytest.mark.slow
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header)
chdu.writeto(self.temp("scale.fits"))
mtime = os.stat(self.temp("scale.fits")).st_mtime
time.sleep(1)
fits.open(self.temp("scale.fits"), mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp("scale.fits")).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp("scale.fits"), "update")
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp("scale.fits")).st_mtime
hdul = fits.open(self.temp("scale.fits"), mode="update")
assert hdul[1].data.dtype == np.dtype("float32")
assert hdul[1].header["BITPIX"] == -32
assert "BZERO" not in hdul[1].header
assert "BSCALE" not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp("scale.fits"))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype("float32")
assert hdul[1].header["BITPIX"] == -32
assert "BZERO" not in hdul[1].header
assert "BSCALE" not in hdul[1].header
hdul.close()
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data("comp.fits")) as hdul:
hdul[1].writeto(self.temp("test.fits"))
with fits.open(self.data("comp.fits")) as hdul1:
with fits.open(self.temp("test.fits")) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(
hdul1[1].compressed_data, hdul2[1].compressed_data
)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(
self.data("fixed-1890.fits"), do_not_scale_image_data=True
) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header)
chdu.writeto(self.temp("fixed-1890-z.fits"))
hdul = fits.open(self.temp("fixed-1890-z.fits"))
orig_data = hdul[1].data
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp("fixed-1890-z.fits"))
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp("fixed-1890-z.fits"), do_not_scale_image_data=True)
hdul.writeto(
self.temp("test_new.fits"), overwrite=True, output_verify="silentfix"
)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp("test_new.fits"), mode="update")
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[1].data == orig_data).all()
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header)
chdu.writeto(self.temp("scale.fits"))
with fits.open(self.temp("scale.fits"), mode="update", scale_back=True) as hdul:
orig_bitpix = hdul[1].header["BITPIX"]
orig_bzero = hdul[1].header["BZERO"]
orig_bscale = hdul[1].header["BSCALE"]
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[1].header["BITPIX"] == orig_bitpix
assert hdul[1].header["BZERO"] == orig_bzero
assert hdul[1].header["BSCALE"] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp("scale.fits")) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data("scale.fits")) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
rng = np.random.default_rng(42)
noise = rng.normal(size=(20, 20))
chdu1 = fits.CompImageHDU(data=noise, compression_type="GZIP_1")
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(
data=noise, compression_type="GZIP_1", quantize_level=0.0
) # No quantization
chdu2.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = (np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] + np.arange(
1, 7
)
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[: data2.shape[0], : data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compression_type="RICE_1", tile_shape=(6, 7))
chdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), disable_image_compression=True) as h:
assert re.match(r"^1PB\(\d+\)$", h[1].header["TFORM1"])
assert re.match(r"^1PB\(\d+\)$", h[1].header["TFORM2"])
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file("comp.fits")
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header["test1"] = "test"
hdul[1]._header["test2"] = "test2"
with fits.open(self.temp("comp.fits")) as hdul:
assert "test1" in hdul[1].header
assert hdul[1].header["test1"] == "test"
assert "test2" in hdul[1].header
assert hdul[1].header["test2"] == "test2"
# Test update via index now:
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdr = hdul[1].header
hdr[hdr.index("TEST1")] = "foo"
with fits.open(self.temp("comp.fits")) as hdul:
assert hdul[1].header["TEST1"] == "foo"
# Test slice updates
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdul[1].header["TEST*"] = "qux"
with fits.open(self.temp("comp.fits")) as hdul:
assert list(hdul[1].header["TEST*"].values()) == ["qux", "qux"]
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdr = hdul[1].header
idx = hdr.index("TEST1")
hdr[idx : idx + 2] = "bar"
with fits.open(self.temp("comp.fits")) as hdul:
assert list(hdul[1].header["TEST*"].values()) == ["bar", "bar"]
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdul[1].header[("COMMENT", 1)] = "I am fire. I am death!"
with fits.open(self.temp("comp.fits")) as hdul:
assert hdul[1].header["COMMENT"][1] == "I am fire. I am death!"
assert hdul[1]._header["COMMENT"][1] == "I am fire. I am death!"
# Test deleting by keyword and by slice
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdr = hdul[1].header
del hdr["COMMENT"]
idx = hdr.index("TEST1")
del hdr[idx : idx + 2]
with fits.open(self.temp("comp.fits")) as hdul:
assert "COMMENT" not in hdul[1].header
assert "COMMENT" not in hdul[1]._header
assert "TEST1" not in hdul[1].header
assert "TEST1" not in hdul[1]._header
assert "TEST2" not in hdul[1].header
assert "TEST2" not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with pytest.warns(UserWarning) as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(f"Keyword {keyword!r} is reserved")
assert keyword not in hdr
with fits.open(self.data("comp.fits")) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, "TFIELDS", 8)
test_set_keyword(hdr, "TTYPE1", "Foo")
test_set_keyword(hdr, "ZCMPTYPE", "ASDF")
test_set_keyword(hdr, "ZVAL1", "Foo")
def test_compression_header_append(self):
with fits.open(self.data("comp.fits")) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.append("TFIELDS")
assert len(w) == 1
assert "TFIELDS" not in imghdr
imghdr.append(("FOO", "bar", "qux"), end=True)
assert "FOO" in imghdr
assert imghdr[-1] == "bar"
assert "FOO" in tblhdr
assert tblhdr[-1] == "bar"
imghdr.append(("CHECKSUM", "abcd1234"))
assert "CHECKSUM" in imghdr
assert imghdr["CHECKSUM"] == "abcd1234"
assert "CHECKSUM" not in tblhdr
assert "ZHECKSUM" in tblhdr
assert tblhdr["ZHECKSUM"] == "abcd1234"
def test_compression_header_append2(self):
"""
Regression test for issue https://github.com/astropy/astropy/issues/5827
"""
with fits.open(self.data("comp.fits")) as hdul:
header = hdul[1].header
while len(header) < 1000:
header.append() # pad with grow room
# Append stats to header:
header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean"))
header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev"))
header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median"))
def test_compression_header_insert(self):
with fits.open(self.data("comp.fits")) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.insert(1000, "TFIELDS")
assert len(w) == 1
assert "TFIELDS" not in imghdr
assert tblhdr.count("TFIELDS") == 1
# First try keyword-relative insert
imghdr.insert("TELESCOP", ("OBSERVER", "Phil Plait"))
assert "OBSERVER" in imghdr
assert imghdr.index("OBSERVER") == imghdr.index("TELESCOP") - 1
assert "OBSERVER" in tblhdr
assert tblhdr.index("OBSERVER") == tblhdr.index("TELESCOP") - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index("OBSERVER")
imghdr.insert("OBSERVER", ("FOO",))
assert "FOO" in imghdr
assert imghdr.index("FOO") == idx
assert "FOO" in tblhdr
assert tblhdr.index("FOO") == tblhdr.index("OBSERVER") - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data("comp.fits")) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'ZBITPIX' is reserved ") as w:
imghdr.set("ZBITPIX", 77, "asdf", after="XTENSION")
assert len(w) == 1
assert "ZBITPIX" not in imghdr
assert tblhdr.count("ZBITPIX") == 1
assert tblhdr["ZBITPIX"] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set("GCOUNT", 99, before="PCOUNT")
assert imghdr.index("GCOUNT") == imghdr.index("PCOUNT") - 1
assert imghdr["GCOUNT"] == 99
assert tblhdr.index("ZGCOUNT") == tblhdr.index("ZPCOUNT") - 1
assert tblhdr["ZGCOUNT"] == 99
assert tblhdr.index("PCOUNT") == 5
assert tblhdr.index("GCOUNT") == 6
assert tblhdr["GCOUNT"] == 1
imghdr.set("GCOUNT", 2, after="PCOUNT")
assert imghdr.index("GCOUNT") == imghdr.index("PCOUNT") + 1
assert imghdr["GCOUNT"] == 2
assert tblhdr.index("ZGCOUNT") == tblhdr.index("ZPCOUNT") + 1
assert tblhdr["ZGCOUNT"] == 2
assert tblhdr.index("PCOUNT") == 5
assert tblhdr.index("GCOUNT") == 6
assert tblhdr["GCOUNT"] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header["COMMENT"] = "hello world"
assert hdu.header["COMMENT"] == ["hello world"]
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["COMMENT"] == ["hello world"]
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype="float32")
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(("ZTENSION", "IMAGE"))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count("ZTENSION") == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_int_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale("int16", bzero=99.0)
hdu2.scale("int16", bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update", scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop("ZNAXIS")
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop("ZBITPIX")
with pytest.raises(KeyError):
comp_hdu.compressed_data
def test_compressed_header_double_extname(self):
"""Test that a double EXTNAME with one default value does not
mask the non-default value."""
with fits.open(self.data("double_ext.fits")) as hdul:
hdu = hdul[1]
# Raw header has 2 EXTNAME entries
indices = hdu._header._keyword_indices["EXTNAME"]
assert len(indices) == 2
# The non-default name should be returned.
assert hdu.name == "ccd00"
assert "EXTNAME" in hdu.header
assert hdu.name == hdu.header["EXTNAME"]
# There should be 1 non-default EXTNAME entries.
indices = hdu.header._keyword_indices["EXTNAME"]
assert len(indices) == 1
# Test header sync from property set.
new_name = "NEW_NAME"
hdu.name = new_name
assert hdu.name == new_name
assert hdu.header["EXTNAME"] == new_name
assert hdu._header["EXTNAME"] == new_name
assert hdu._image_header["EXTNAME"] == new_name
# Check that setting the header will change the name property.
hdu.header["EXTNAME"] = "NEW2"
assert hdu.name == "NEW2"
hdul.writeto(self.temp("tmp.fits"), overwrite=True)
with fits.open(self.temp("tmp.fits")) as hdul1:
hdu1 = hdul1[1]
assert len(hdu1._header._keyword_indices["EXTNAME"]) == 1
assert hdu1.name == "NEW2"
# Check that deleting EXTNAME will and setting the name will
# work properly.
del hdu.header["EXTNAME"]
hdu.name = "RE-ADDED"
assert hdu.name == "RE-ADDED"
with pytest.raises(TypeError):
hdu.name = 42
def test_compressed_header_extname(self):
"""Test consistent EXTNAME / hdu name interaction."""
name = "FOO"
hdu = fits.CompImageHDU(data=np.arange(10), name=name)
assert hdu._header["EXTNAME"] == name
assert hdu.header["EXTNAME"] == name
assert hdu.name == name
name = "BAR"
hdu.name = name
assert hdu._header["EXTNAME"] == name
assert hdu.header["EXTNAME"] == name
assert hdu.name == name
assert len(hdu._header._keyword_indices["EXTNAME"]) == 1
def test_compressed_header_minimal(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11694
Tests that CompImageHDU can be initialized with a Header that
contains few or no cards, and doesn't require specific cards
such as 'BITPIX' or 'NAXIS'.
"""
fits.CompImageHDU(data=np.arange(10), header=fits.Header())
header = fits.Header({"HELLO": "world"})
hdu = fits.CompImageHDU(data=np.arange(10), header=header)
assert hdu.header["HELLO"] == "world"
@pytest.mark.parametrize(
("keyword", "dtype", "expected"),
[
("BSCALE", np.uint8, np.float32),
("BSCALE", np.int16, np.float32),
("BSCALE", np.int32, np.float64),
("BZERO", np.uint8, np.float32),
("BZERO", np.int16, np.float32),
("BZERO", np.int32, np.float64),
],
)
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp("test.fits"))
del hdu
with fits.open(self.temp("test.fits")) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
@pytest.mark.parametrize(
"dtype", (np.uint8, np.int16, np.uint16, np.int32, np.uint32)
)
def test_compressed_integers(self, dtype):
"""Test that the various integer dtypes are correctly written and read.
Regression test for https://github.com/astropy/astropy/issues/9072
"""
mid = np.iinfo(dtype).max // 2
data = np.arange(mid - 50, mid + 50, dtype=dtype)
testfile = self.temp("test.fits")
hdu = fits.CompImageHDU(data=data)
hdu.writeto(testfile, overwrite=True)
new = fits.getdata(testfile)
np.testing.assert_array_equal(data, new)
@pytest.mark.parametrize(
("dtype", "compression_type"), product(("f", "i4"), COMPRESSION_TYPES)
)
def test_write_non_contiguous_data(self, dtype, compression_type):
"""
Regression test for https://github.com/astropy/astropy/issues/2150
This used to require changing the whole array to be C-contiguous before
passing to CFITSIO, but we no longer need this - our explicit conversion
to bytes in the compression codecs returns contiguous bytes for each
tile on-the-fly.
"""
orig = np.arange(400, dtype=dtype).reshape((20, 20), order="f")[::2, ::2]
assert not orig.flags.contiguous
primary = fits.PrimaryHDU()
hdu = fits.CompImageHDU(orig, compression_type=compression_type)
hdulist = fits.HDUList([primary, hdu])
hdulist.writeto(self.temp("test.fits"))
actual = fits.getdata(self.temp("test.fits"))
assert_equal(orig, actual)
def test_slice_and_write_comp_hdu(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9955
"""
with fits.open(self.data("comp.fits")) as hdul:
hdul[1].data = hdul[1].data[:200, :100]
assert not hdul[1].data.flags.contiguous
hdul[1].writeto(self.temp("test.fits"))
with fits.open(self.data("comp.fits")) as hdul1:
with fits.open(self.temp("test.fits")) as hdul2:
assert_equal(hdul1[1].data[:200, :100], hdul2[1].data)
def test_comp_image_deprecated_tile_size(self):
# Ensure that tile_size works but is deprecated. This test
# can be removed once support for tile_size is removed.
with pytest.warns(
AstropyDeprecationWarning,
match="The tile_size argument has been deprecated",
):
chdu = fits.CompImageHDU(np.zeros((3, 4, 5)), tile_size=(5, 2, 1))
assert chdu.tile_shape == (1, 2, 5)
def test_comp_image_deprecated_tile_size_and_tile_shape(self):
# Make sure that tile_size and tile_shape are not both specified
with pytest.warns(AstropyDeprecationWarning) as w:
with pytest.raises(
ValueError, match="Cannot specify both tile_size and tile_shape."
):
fits.CompImageHDU(
np.zeros((3, 4, 5)), tile_size=(5, 2, 1), tile_shape=(3, 2, 3)
)
def test_comp_image_properties_default(self):
chdu = fits.CompImageHDU(np.zeros((3, 4, 5)))
assert chdu.tile_shape == (1, 1, 5)
assert chdu.compression_type == "RICE_1"
def test_comp_image_properties_set(self):
chdu = fits.CompImageHDU(
np.zeros((3, 4, 5)), compression_type="PLIO_1", tile_shape=(2, 3, 4)
)
assert chdu.tile_shape == (2, 3, 4)
assert chdu.compression_type == "PLIO_1"
class TestCompHDUSections:
@pytest.fixture(autouse=True)
def setup_method(self, tmp_path):
shape = (13, 17, 25)
self.data = np.arange(np.prod(shape)).reshape(shape).astype(np.int32)
header1 = fits.Header()
hdu1 = fits.CompImageHDU(
self.data, header1, compression_type="RICE_1", tile_shape=(5, 4, 5)
)
header2 = fits.Header()
header2["BSCALE"] = 2
header2["BZERO"] = 100
hdu2 = fits.CompImageHDU(
self.data, header2, compression_type="RICE_1", tile_shape=(5, 4, 5)
)
hdulist = fits.HDUList([fits.PrimaryHDU(), hdu1, hdu2])
hdulist.writeto(tmp_path / "sections.fits")
self.hdul = fits.open(tmp_path / "sections.fits")
def teardown_method(self):
self.hdul.close()
self.hdul = None
@given(basic_indices((13, 17, 25)))
def test_section_slicing(self, index):
assert_equal(self.hdul[1].section[index], self.hdul[1].data[index])
assert_equal(self.hdul[1].section[index], self.data[index])
@given(basic_indices((13, 17, 25)))
def test_section_slicing_scaling(self, index):
assert_equal(self.hdul[2].section[index], self.hdul[2].data[index])
assert_equal(self.hdul[2].section[index], self.data[index] * 2 + 100)
def test_comphdu_fileobj():
# Regression test for a bug that caused an error to happen
# internally when reading the data if requested data shapes
# were not plain integers - this was triggered when accessing
# sections on data backed by certain kinds of objects such as
# BytesIO (but not regular file handles)
data = np.arange(6).reshape((2, 3)).astype(np.int32)
byte_buffer = BytesIO()
header = fits.Header()
hdu = fits.CompImageHDU(data, header, compression_type="RICE_1")
hdu.writeto(byte_buffer)
byte_buffer.seek(0)
hdu2 = fits.open(byte_buffer, mode="readonly")[1]
assert hdu2.section[1, 2] == 5
def test_comphdu_bscale(tmp_path):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmp_path / "3hdus.fits"
filename2 = tmp_path / "3hdus_comp.fits"
x = np.random.random((100, 100)) * 100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x - 50, dtype=int), uint=True)
x1.header["BZERO"] = 20331
x1.header["BSCALE"] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(
data=hdus[1].data.astype(np.uint32), header=hdus[1].header
)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify("exception")
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = get_pkg_data_filename("data/compressed_float_bzero.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmp_path):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmp_path / "floatimg_with_bzero.fits"
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header["BZERO"] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmp_path):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmp_path / "test.fits"
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmp_path / "test2.fits"
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
def test_int8(tmp_path):
"""Test for int8 support, https://github.com/astropy/astropy/issues/11995"""
img = np.arange(-50, 50, dtype=np.int8).reshape(10, 10)
hdu = fits.PrimaryHDU(img)
hdu.writeto(tmp_path / "int8.fits")
with fits.open(tmp_path / "int8.fits") as hdul:
assert hdul[0].header["BITPIX"] == 8
assert hdul[0].header["BZERO"] == -128
assert hdul[0].header["BSCALE"] == 1.0
assert_equal(hdul[0].data, img)
assert hdul[0].data.dtype == img.dtype
|
cda523a23c9d7f13a5fe2782fc174691386f9e0b152c038c34069494f70f0352 | """
This test file uses the https://github.com/esheldon/fitsio package to verify
our compression and decompression routines against the implementation in
cfitsio.
*Note*: The fitsio library is GPL licensed, therefore it could be interpreted
that so is this test file. Given that this test file isn't imported anywhere
else in the code this shouldn't cause us any issues. Please bear this in mind
when editing this file.
"""
import os
import numpy as np
import pytest
from astropy.io import fits
from .conftest import _expand, fitsio_param_to_astropy_param
# This is so that tox can force this file to be run, and not be silently
# skipped on CI, but in all other test runs it's skipped if fitsio isn't present.
if "ASTROPY_ALWAYS_TEST_FITSIO" in os.environ:
import fitsio
else:
fitsio = pytest.importorskip("fitsio")
@pytest.fixture(
scope="module",
params=_expand(
[((10,),), ((5,), (1,), (3,))],
[((12, 12),), ((1, 12), (4, 5), (6, 6), None)],
[((15, 15),), ((1, 15), (5, 1), (5, 5))],
[
((15, 15, 15),),
((5, 5, 1), (5, 7, 1), (1, 5, 4), (1, 1, 15), (15, 1, 5)),
],
# Test the situation where the tile shape is passed larger than the
# array shape
[
(
(4, 4, 5),
(5, 5, 5),
),
(
(5, 5, 1),
None,
),
],
# Test shapes which caused errors
# This one we can't test here as it causes segfaults in cfitsio
# It is tested in test_roundtrip_high_D though.
# [
# ((3, 4, 5),),
# ((1, 2, 3),),
# ],
# >3D Data are not currently supported by cfitsio
),
ids=lambda x: f"shape: {x[0]} tile_dims: {x[1]}",
)
def array_shapes_tile_dims(request, compression_type):
shape, tile_dims = request.param
# H_COMPRESS needs >=2D data and always 2D tiles
if compression_type == "HCOMPRESS_1":
if (
# We don't have at least a 2D image
len(shape) < 2
or
# We don't have 2D tiles
np.count_nonzero(np.array(tile_dims) != 1) != 2
or
# TODO: The following restrictions can be lifted with some extra work.
# The tile is not the first two dimensions of the data
tile_dims[0] == 1
or tile_dims[1] == 1
or
# The tile dimensions not an integer multiple of the array dims
np.count_nonzero(np.array(shape[:2]) % tile_dims[:2]) != 0
):
pytest.xfail(
"HCOMPRESS requires 2D tiles, from the first two"
"dimensions, and an integer number of tiles along the first two"
"axes."
)
return shape, tile_dims
@pytest.fixture(scope="module")
def tile_dims(array_shapes_tile_dims):
return array_shapes_tile_dims[1]
@pytest.fixture(scope="module")
def data_shape(array_shapes_tile_dims):
return array_shapes_tile_dims[0]
@pytest.fixture(scope="module")
def base_original_data(data_shape, dtype, numpy_rng, compression_type):
random = numpy_rng.uniform(high=255, size=data_shape)
# Set first value to be exactly zero as zero values require special treatment
# for SUBTRACTIVE_DITHER_2
random.ravel()[0] = 0.0
# There seems to be a bug with the fitsio library where HCOMPRESS doesn't
# work with int16 random data, so use a bit for structured test data.
if compression_type.startswith("HCOMPRESS") and "i2" in dtype or "u1" in dtype:
random = np.arange(np.prod(data_shape)).reshape(data_shape)
return random.astype(dtype)
@pytest.fixture(scope="module")
def fitsio_compressed_file_path(
tmp_path_factory,
comp_param_dtype,
base_original_data,
data_shape, # For debugging
tile_dims,
):
compression_type, param, dtype = comp_param_dtype
if (
base_original_data.ndim > 2
and "u1" in dtype
and compression_type == "HCOMPRESS_1"
):
pytest.xfail("fitsio won't write these")
if compression_type == "PLIO_1" and "f" in dtype:
# fitsio fails with a compression error
pytest.xfail("fitsio fails to write these")
if compression_type == "NOCOMPRESS":
pytest.xfail("fitsio does not support NOCOMPRESS")
if (
compression_type == "HCOMPRESS_1"
and "f" in dtype
and param.get("qmethod", None) == 2
):
# fitsio writes these files with very large/incorrect zzero values, whereas
# qmethod == 1 works (and the two methods should be identical except for the
# treatment of zeros)
pytest.xfail("fitsio writes these files with very large/incorrect zzero values")
tmp_path = tmp_path_factory.mktemp("fitsio")
original_data = base_original_data.astype(dtype)
filename = tmp_path / f"{compression_type}_{dtype}.fits"
fits = fitsio.FITS(filename, "rw")
fits.write(original_data, compress=compression_type, tile_dims=tile_dims, **param)
return filename
@pytest.fixture(scope="module")
def astropy_compressed_file_path(
comp_param_dtype,
tmp_path_factory,
base_original_data,
data_shape, # For debugging
tile_dims,
):
compression_type, param, dtype = comp_param_dtype
original_data = base_original_data.astype(dtype)
tmp_path = tmp_path_factory.mktemp("astropy")
filename = tmp_path / f"{compression_type}_{dtype}.fits"
param = fitsio_param_to_astropy_param(param)
hdu = fits.CompImageHDU(
data=original_data,
compression_type=compression_type,
tile_shape=None if tile_dims is None else tile_dims[::-1],
**param,
)
hdu.writeto(filename)
return filename
def test_decompress(
fitsio_compressed_file_path,
comp_param_dtype,
):
compression_type, param, dtype = comp_param_dtype
with fits.open(fitsio_compressed_file_path) as hdul:
data = hdul[1].data
assert hdul[1]._header["ZCMPTYPE"].replace("ONE", "1") == compression_type
assert hdul[1].data.dtype.kind == np.dtype(dtype).kind
assert hdul[1].data.dtype.itemsize == np.dtype(dtype).itemsize
# The data might not always match the original data exactly in the case of
# lossy compression so instead of comparing the array read by astropy to the
# original data, we compare it to the data read in by fitsio (as those
# should match)
fts = fitsio.FITS(fitsio_compressed_file_path)
data2 = fts[1].read()
np.testing.assert_allclose(data, data2)
# The first value should be exactly equal to zero when using SUBTRACTIVE_DITHER_2
if param.get("qmethod", None) == 2:
assert data.ravel()[0] == 0.0
def test_compress(
astropy_compressed_file_path,
compression_type,
dtype,
):
if compression_type == "NOCOMPRESS":
pytest.xfail("fitsio does not support NOCOMPRESS")
fts = fitsio.FITS(astropy_compressed_file_path, "r")
header = fts[1].read_header()
data = fts[1].read()
assert header["ZCMPTYPE"] == compression_type
assert data.dtype.kind == np.dtype(dtype).kind
assert data.dtype.itemsize == np.dtype(dtype).itemsize
# The data might not always match the original data exactly in the case of
# lossy compression so instead of comparing the array read by fitsio to the
# original data, we compare it to the data read in by astropy (as those
# should match)
with fits.open(astropy_compressed_file_path) as hdul:
np.testing.assert_allclose(data, hdul[1].data)
|
5556e48619710d94583c10882ced2cefcd1fe7c603f24ed99d31337cc6cd996e | import itertools
import numpy as np
import pytest
COMPRESSION_TYPES = [
"GZIP_1",
"GZIP_2",
"RICE_1",
"HCOMPRESS_1",
"PLIO_1",
"NOCOMPRESS",
]
def fitsio_param_to_astropy_param(param):
# Convert fitsio kwargs to astropy kwargs
_map = {"qlevel": "quantize_level", "qmethod": "quantize_method"}
param = {_map[k]: v for k, v in param.items()}
# Map quantize_level
if param.get("quantize_level", "missing") is None:
param["quantize_level"] = 0.0
return param
def _expand(*params):
"""
Expands a list of N iterables of parameters into a flat list with all
combinations of all parameters.
"""
expanded = []
for ele in params:
expanded += list(itertools.product(*ele))
return expanded
ALL_INTEGER_DTYPES = [
"".join(ele)
for ele in _expand([("<", ">"), ("i",), ("2", "4")], [("<", ">"), ("u",), ("1",)])
]
ALL_FLOAT_DTYPES = ["".join(ele) for ele in _expand([("<", ">"), ("f",), ("4", "8")])]
@pytest.fixture(
scope="session",
ids=lambda x: " ".join(map(str, x)),
# The params here are compression type, parameters for the compression /
# quantise and dtype
params=_expand(
# Test all compression types with default compression parameters for
# all integers
[
COMPRESSION_TYPES,
({},),
ALL_INTEGER_DTYPES,
],
# GZIP and NOCOMPRESS support lossless non-quantized floating point data
[
("GZIP_1", "GZIP_2", "NOCOMPRESS"),
({"qlevel": None},),
ALL_FLOAT_DTYPES,
],
# All compression types can also take quantized floating point input
# Rather than running all quantization parameters for all algorithms
# split up the algorithms to reduce the total number of tests.
[
["GZIP_1", "GZIP_2"],
({"qlevel": 5, "qmethod": -1},),
ALL_FLOAT_DTYPES,
],
[
["RICE_1"],
({"qlevel": 10, "qmethod": 1},),
ALL_FLOAT_DTYPES,
],
[
["HCOMPRESS_1"],
(
{"qlevel": 20, "qmethod": 2},
{"qlevel": 10, "qmethod": 1},
),
ALL_FLOAT_DTYPES,
],
# Note no PLIO here as that's intended for masks, i.e. data which can't
# be generated with quantization.
),
)
def comp_param_dtype(request):
return request.param
@pytest.fixture(scope="session")
def compression_type(comp_param_dtype):
return comp_param_dtype[0]
@pytest.fixture(scope="session")
def compression_param(comp_param_dtype):
return comp_param_dtype[1]
@pytest.fixture(scope="session")
def dtype(comp_param_dtype):
return comp_param_dtype[2]
@pytest.fixture(scope="session")
def numpy_rng():
return np.random.default_rng(0)
|
dafa0f079a615b7c1c718bc963e56d3a156cc3da2f4fda6a5dde96cd2117f67f | from pathlib import Path
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from astropy.io import fits
from astropy.io.fits._tiled_compression.codecs import PLIO1
from .conftest import fitsio_param_to_astropy_param
@pytest.fixture
def canonical_data_base_path():
return Path(__file__).parent / "data"
@pytest.fixture(
params=(Path(__file__).parent / "data").glob("m13_*.fits"), ids=lambda x: x.name
)
def canonical_int_hdus(request):
"""
This fixture provides 4 files downloaded from https://fits.gsfc.nasa.gov/registry/tilecompression.html
Which are used as canonical tests of data not compressed by Astropy.
"""
with fits.open(request.param) as hdul:
yield hdul[1]
@pytest.fixture
def original_int_hdu(canonical_data_base_path):
with fits.open(canonical_data_base_path / "m13.fits") as hdul:
yield hdul[0]
def test_canonical_data(original_int_hdu, canonical_int_hdus):
assert_allclose(original_int_hdu.data, canonical_int_hdus.data)
def test_zblank_support(canonical_data_base_path, tmp_path):
# This uses a test 12x12 image which contains a NaN value in the [1, 1]
# pixel - it was compressed using fpack which automatically added a ZBLANK
# header keyword
reference = np.arange(144).reshape((12, 12)).astype(float)
reference[1, 1] = np.nan
with fits.open(canonical_data_base_path / "compressed_with_nan.fits") as hdul:
assert_equal(np.round(hdul[1].data), reference)
# Now generate a file ourselves and check that the output has the ZBLANK
# keyword set automatically
hdu = fits.CompImageHDU(
data=reference, compression_type="RICE_1", tile_shape=(6, 6)
)
hdu.writeto(tmp_path / "test_zblank.fits")
with fits.open(tmp_path / "test_zblank.fits") as hdul:
assert "ZBLANK" in hdul[1].header
assert_equal(np.round(hdul[1].data), reference)
@pytest.mark.parametrize(
("shape", "tile_shape"),
(
([10, 10], [5, 5]), # something for HCOMPRESS
([5, 5, 5], [5, 5, 5]),
# ([5, 5, 5], [5, 5, 1]), # something for HCOMPRESS
([10, 15, 20], [5, 5, 5]),
([10, 5, 12], [5, 5, 5]),
# TODO: There's a stupid bit of code in CompImageHDU which stops this working.
# ([2, 3, 4, 5], [1, 1, 2, 3]),
([2, 3, 4, 5], [5, 5, 1, 1]),
),
)
def test_roundtrip_high_D(
numpy_rng, compression_type, compression_param, tmp_path, dtype, shape, tile_shape
):
if compression_type == "HCOMPRESS_1" and (
# We don't have at least a 2D image
len(shape) < 2
or
# We don't have 2D tiles
np.count_nonzero(np.array(tile_shape) != 1) != 2
or
# TODO: The following restrictions can be lifted with some extra work.
# The tile is not the first two dimensions of the data
tile_shape[0] == 1
or tile_shape[1] == 1
or
# The tile dimensions not an integer multiple of the array dims
np.count_nonzero(np.array(shape[:2]) % tile_shape[:2]) != 0
):
pytest.xfail("HCOMPRESS requires 2D tiles.")
random = numpy_rng.uniform(high=255, size=shape)
# Set first value to be exactly zero as zero values require special treatment
# for SUBTRACTIVE_DITHER_2
random.ravel()[0] = 0.0
original_data = random.astype(dtype)
dtype_sanitizer = {
">": "big",
"<": "little",
"=": "native",
}
filename = (
tmp_path / f"{compression_type}_{dtype[1:]}_{dtype_sanitizer[dtype[0]]}.fits"
)
param = fitsio_param_to_astropy_param(compression_param)
hdu = fits.CompImageHDU(
data=original_data,
compression_type=compression_type,
tile_shape=tile_shape,
**param,
)
hdu.writeto(filename)
atol = 0
if compression_param.get("qmethod", None) is not None:
# This is a horrific hack We are comparing quantized data to unquantized
# data here, so there can be pretty large differences. What this test
# is really checking for is arrays which are *completely* different,
# which would indicate the compression has not worked.
atol = 17
with fits.open(filename) as hdul:
a = hdul[1].data
np.testing.assert_allclose(original_data, hdul[1].data, atol=atol)
def test_plio_1_out_of_range():
pc = PLIO1(tilesize=10)
data = np.arange(-10, 0).astype(np.int32)
with pytest.raises(ValueError):
pc.encode(data)
|
665de4bdf251ff15d3460e534e77cd978f4371237d81722ac4dfc842f0fc5786 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``ECSV``
reader/writer.
"""
import copy
import os
import sys
from contextlib import nullcontext
from io import StringIO
import numpy as np
import pytest
import yaml
from astropy import units as u
from astropy.io import ascii
from astropy.io.ascii.ecsv import DELIMITERS, InvalidEcsvDatatypeWarning
from astropy.io.tests.mixin_columns import compare_attrs, mixin_cols, serialized_names
from astropy.table import Column, QTable, Table
from astropy.table.column import MaskedColumn
from astropy.table.table_helpers import simple_table
from astropy.units import QuantityInfo
from astropy.units import allclose as quantity_allclose
from .common import TEST_DIR
DTYPES = [
"bool",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"float128",
"str",
]
if not hasattr(np, "float128") or os.name == "nt" or sys.maxsize <= 2**32:
DTYPES.remove("float128")
T_DTYPES = Table()
for dtype in DTYPES:
if dtype == "bool":
data = np.array([False, True, False])
elif dtype == "str":
data = np.array(["ab 0", "ab, 1", "ab2"])
else:
data = np.arange(3, dtype=dtype)
c = Column(
data, unit="m / s", description="descr_" + dtype, meta={"meta " + dtype: 1}
)
T_DTYPES[dtype] = c
T_DTYPES.meta["comments"] = ["comment1", "comment2"]
# Corresponds to simple_table()
SIMPLE_LINES = [
"# %ECSV 1.0",
"# ---",
"# datatype:",
"# - {name: a, datatype: int64}",
"# - {name: b, datatype: float64}",
"# - {name: c, datatype: string}",
"# schema: astropy-2.0",
"a b c",
"1 1.0 c",
"2 2.0 d",
"3 3.0 e",
]
def test_write_simple():
"""
Write a simple table with common types. This shows the compact version
of serialization with one line per column.
"""
t = simple_table()
out = StringIO()
t.write(out, format="ascii.ecsv")
assert out.getvalue().splitlines() == SIMPLE_LINES
def test_write_full():
"""
Write a full-featured table with common types and explicitly checkout output
"""
t = T_DTYPES["bool", "int64", "float64", "str"]
lines = [
"# %ECSV 1.0",
"# ---",
"# datatype:",
"# - name: bool",
"# unit: m / s",
"# datatype: bool",
"# description: descr_bool",
"# meta: {meta bool: 1}",
"# - name: int64",
"# unit: m / s",
"# datatype: int64",
"# description: descr_int64",
"# meta: {meta int64: 1}",
"# - name: float64",
"# unit: m / s",
"# datatype: float64",
"# description: descr_float64",
"# meta: {meta float64: 1}",
"# - name: str",
"# unit: m / s",
"# datatype: string",
"# description: descr_str",
"# meta: {meta str: 1}",
"# meta: !!omap",
"# - comments: [comment1, comment2]",
"# schema: astropy-2.0",
"bool int64 float64 str",
'False 0 0.0 "ab 0"',
'True 1 1.0 "ab, 1"',
"False 2 2.0 ab2",
]
out = StringIO()
t.write(out, format="ascii.ecsv")
assert out.getvalue().splitlines() == lines
def test_write_read_roundtrip():
"""
Write a full-featured table with all types and see that it round-trips on
readback. Use both space and comma delimiters.
"""
t = T_DTYPES
for delimiter in DELIMITERS:
out = StringIO()
t.write(out, format="ascii.ecsv", delimiter=delimiter)
t2s = [
Table.read(out.getvalue(), format="ascii.ecsv"),
Table.read(out.getvalue(), format="ascii"),
ascii.read(out.getvalue()),
ascii.read(out.getvalue(), format="ecsv", guess=False),
ascii.read(out.getvalue(), format="ecsv"),
]
for t2 in t2s:
assert t.meta == t2.meta
for name in t.colnames:
assert t[name].attrs_equal(t2[name])
assert np.all(t[name] == t2[name])
def test_bad_delimiter():
"""
Passing a delimiter other than space or comma gives an exception
"""
out = StringIO()
with pytest.raises(ValueError) as err:
T_DTYPES.write(out, format="ascii.ecsv", delimiter="|")
assert "only space and comma are allowed" in str(err.value)
def test_bad_header_start():
"""
Bad header without initial # %ECSV x.x
"""
lines = copy.copy(SIMPLE_LINES)
lines[0] = "# %ECV 0.9"
with pytest.raises(ascii.InconsistentTableError):
Table.read("\n".join(lines), format="ascii.ecsv", guess=False)
def test_bad_delimiter_input():
"""
Illegal delimiter in input
"""
lines = copy.copy(SIMPLE_LINES)
lines.insert(2, "# delimiter: |")
with pytest.raises(ValueError) as err:
Table.read("\n".join(lines), format="ascii.ecsv", guess=False)
assert "only space and comma are allowed" in str(err.value)
def test_multidim_input():
"""
Multi-dimensional column in input
"""
t = Table()
t["a"] = np.arange(24).reshape(2, 3, 4)
t["a"].info.description = "description"
t["a"].info.meta = {1: 2}
t["b"] = [1, 2]
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert np.all(t2["a"] == t["a"])
assert t2["a"].shape == t["a"].shape
assert t2["a"].dtype == t["a"].dtype
assert t2["a"].info.description == t["a"].info.description
assert t2["a"].info.meta == t["a"].info.meta
assert np.all(t2["b"] == t["b"])
def test_structured_input():
"""
Structured column in input.
"""
t = Table()
# Add unit, description and meta to make sure that round-trips as well.
t["a"] = Column(
[("B", (1.0, [2.0, 3.0])), ("A", (9.0, [8.0, 7.0]))],
dtype=[("s", "U1"), ("v", [("p0", "f8"), ("p1", "2f8")])],
description="description",
format=">", # Most formats do not work with structured!
unit="m", # Overall unit should round-trip.
meta={1: 2},
)
t["b"] = Column(
[[(1.0, 2.0), (9.0, 8.0)], [(3.0, 4.0), (7.0, 6.0)]],
dtype="f8,f8",
unit=u.Unit("m,s"), # Per part unit should round-trip too.
)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
for col in t.colnames:
assert np.all(t2[col] == t[col])
assert t2[col].shape == t[col].shape
assert t2[col].dtype == t[col].dtype
assert t2[col].unit == t[col].unit
assert t2[col].format == t[col].format
assert t2[col].info.description == t[col].info.description
assert t2[col].info.meta == t[col].info.meta
def test_round_trip_empty_table():
"""Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)"""
t = Table(dtype=[bool, "i", "f"], names=["a", "b", "c"])
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t.dtype == t2.dtype
assert len(t2) == 0
def test_csv_ecsv_colnames_mismatch():
"""
Test that mismatch in column names from normal CSV header vs.
ECSV YAML header raises the expected exception.
"""
lines = copy.copy(SIMPLE_LINES)
header_index = lines.index("a b c")
lines[header_index] = "a b d"
with pytest.raises(ValueError) as err:
ascii.read(lines, format="ecsv")
assert "column names from ECSV header ['a', 'b', 'c']" in str(err.value)
def test_regression_5604():
"""
See https://github.com/astropy/astropy/issues/5604 for more.
"""
t = Table()
t.meta = {"foo": 5 * u.km, "foo2": u.s}
t["bar"] = [7] * u.km
out = StringIO()
t.write(out, format="ascii.ecsv")
assert "!astropy.units.Unit" in out.getvalue()
assert "!astropy.units.Quantity" in out.getvalue()
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
assert obj1.shape == obj2.shape
info_attrs = [
"info.name",
"info.format",
"info.unit",
"info.description",
"info.dtype",
]
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split("."):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
if isinstance(a1, np.ndarray) and a1.dtype.kind == "f":
assert quantity_allclose(a1, a2, rtol=1e-10)
else:
assert np.all(a1 == a2)
# For no attrs that means we just compare directly.
if not attrs:
if isinstance(obj1, np.ndarray) and obj1.dtype.kind == "f":
assert quantity_allclose(obj1, obj2, rtol=1e-15)
else:
assert np.all(obj1 == obj2)
def test_ecsv_mixins_ascii_read_class():
"""Ensure that ascii.read(ecsv_file) returns the correct class
(QTable if any Quantity subclasses, Table otherwise).
"""
# Make a table with every mixin type except Quantities
t = QTable(
{
name: col
for name, col in mixin_cols.items()
if not isinstance(col.info, QuantityInfo)
}
)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format="ecsv")
assert type(t2) is Table
# Add a single quantity column
t["lon"] = mixin_cols["lon"]
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format="ecsv")
assert type(t2) is QTable
def test_ecsv_mixins_qtable_to_table():
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.allclose(col.value, col2, rtol=1e-10)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_ecsv_mixins_as_one(table_cls):
"""Test write/read all cols at once and validate intermediate column names"""
names = sorted(mixin_cols)
all_serialized_names = []
# ECSV stores times as value by default, so we just get the column back.
# One exception is tm3, which is set to serialize via jd1 and jd2.
for name in names:
s_names = serialized_names[name]
if not name.startswith("tm3"):
s_names = [
s_name.replace(".jd1", "")
for s_name in s_names
if not s_name.endswith("jd2")
]
all_serialized_names.extend(s_names)
t = table_cls([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format="ascii.ecsv")
assert t.colnames == t2.colnames
# Read as a ascii.basic table (skip all the ECSV junk)
t3 = table_cls.read(out.getvalue(), format="ascii.basic")
assert t3.colnames == all_serialized_names
def make_multidim(col, ndim):
"""Take a col with length=2 and make it N-d by repeating elements.
For the special case of ndim==1 just return the original.
The output has shape [3] * ndim. By using 3 we can be sure that repeating
the two input elements gives an output that is sufficiently unique for
the multidim tests.
"""
if ndim > 1:
import itertools
idxs = [idx for idx, _ in zip(itertools.cycle([0, 1]), range(3**ndim))]
col = col[idxs].reshape([3] * ndim)
return col
@pytest.mark.parametrize("name_col", list(mixin_cols.items()))
@pytest.mark.parametrize("table_cls", (Table, QTable))
@pytest.mark.parametrize("ndim", (1, 2, 3))
def test_ecsv_mixins_per_column(table_cls, name_col, ndim):
"""Test write/read one col at a time and do detailed validation.
This tests every input column type as 1-d, 2-d and 3-d.
"""
name, col = name_col
c = make_multidim(np.array([1.0, 2.0]), ndim)
col = make_multidim(col, ndim)
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "description"
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format="ascii.ecsv")
assert t.colnames == t2.colnames
for colname in t.colnames:
assert len(t2[colname].shape) == ndim
if colname in ("c1", "c2"):
compare = ["data"]
else:
# Storing Longitude as Column loses wrap_angle.
compare = [
attr
for attr in compare_attrs[colname]
if not (attr == "wrap_angle" and table_cls is Table)
]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
def test_round_trip_masked_table_default(tmp_path):
"""Test (mostly) round-trip of MaskedColumn through ECSV using default serialization
that uses an empty string "" to mark NULL values. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = tmp_path / "test.ecsv"
t = simple_table(masked=True) # int, float, and str cols with one masked element
t.write(filename)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
# From formal perspective the round-trip columns are the "same"
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# But peeking under the mask shows that the underlying data are changed
# because by default ECSV uses "" to represent masked elements.
t[name].mask = False
t2[name].mask = False
assert not np.all(t2[name] == t[name]) # Expected diff
def test_round_trip_masked_table_serialize_mask(tmp_path):
"""
Same as prev but set the serialize_method to 'data_mask' so mask is written out
"""
filename = tmp_path / "test.ecsv"
t = simple_table(masked=True) # int, float, and str cols with one masked element
t["c"][0] = "" # This would come back as masked for default "" NULL marker
# MaskedColumn with no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about how we test a column with no masked elements.
t["d"] = [1, 2, 3]
t.write(filename, serialize_method="data_mask")
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_ecsv_round_trip_user_defined_unit(table_cls, tmp_path):
"""Ensure that we can read-back enabled user-defined units."""
# Test adapted from #8897, where it was noted that this works
# but was not tested.
filename = tmp_path / "test.ecsv"
unit = u.def_unit("bandpass_sol_lum")
t = table_cls()
t["l"] = np.arange(5) * unit
t.write(filename)
# without the unit enabled, get UnrecognizedUnit
if table_cls is QTable:
ctx = pytest.warns(u.UnitsWarning, match=r"'bandpass_sol_lum' did not parse .*")
else:
ctx = nullcontext()
# Note: The read might also generate ResourceWarning, in addition to UnitsWarning
with ctx:
t2 = table_cls.read(filename)
assert isinstance(t2["l"].unit, u.UnrecognizedUnit)
assert str(t2["l"].unit) == "bandpass_sol_lum"
if table_cls is QTable:
assert np.all(t2["l"].value == t["l"].value)
else:
assert np.all(t2["l"] == t["l"])
# But with it enabled, it works.
with u.add_enabled_units(unit):
t3 = table_cls.read(filename)
assert t3["l"].unit is unit
assert np.all(t3["l"] == t["l"])
# Just to be sure, also try writing with unit enabled.
filename2 = tmp_path / "test2.ecsv"
t3.write(filename2)
t4 = table_cls.read(filename)
assert t4["l"].unit is unit
assert np.all(t4["l"] == t["l"])
def test_read_masked_bool():
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: col0, datatype: bool}
# schema: astropy-2.0
col0
1
0
True
""
False
"""
dat = ascii.read(txt, format="ecsv")
col = dat["col0"]
assert isinstance(col, MaskedColumn)
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
@pytest.mark.parametrize("serialize_method", ["null_value", "data_mask"])
@pytest.mark.parametrize("dtype", [np.int64, np.float64, bool, str])
@pytest.mark.parametrize("delimiter", [",", " "])
def test_roundtrip_multidim_masked_array(serialize_method, dtype, delimiter):
# TODO also test empty string with null value
t = Table()
col = MaskedColumn(np.arange(12).reshape(2, 3, 2), dtype=dtype)
if dtype is str:
# np does something funny and gives a dtype of U21.
col = col.astype("U2")
col.mask[0, 0, 0] = True
col.mask[1, 1, 1] = True
t["a"] = col
t["b"] = ["x", "y"] # Add another column for kicks
out = StringIO()
t.write(out, format="ascii.ecsv", serialize_method=serialize_method)
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
if hasattr(t[name], "mask"):
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize("subtype", ["some-user-type", "complex"])
def test_multidim_unknown_subtype(subtype):
"""Test an ECSV file with a string type but unknown subtype"""
txt = f"""\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: {subtype}
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.warns(
InvalidEcsvDatatypeWarning,
match=rf"unexpected subtype '{subtype}' set for column 'a'",
):
t = ascii.read(txt, format="ecsv")
assert t["a"].dtype.kind == "U"
assert t["a"][0] == "[1,2]"
def test_multidim_bad_shape():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: int64[3]
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.raises(
ValueError, match="column 'a' failed to convert: shape mismatch"
):
Table.read(txt, format="ascii.ecsv")
def test_write_not_json_serializable():
t = Table()
t["a"] = np.array([{1, 2}, 1], dtype=object)
match = (
"could not convert column 'a' to string: Object of type set is not JSON"
" serializable"
)
out = StringIO()
with pytest.raises(TypeError, match=match):
t.write(out, format="ascii.ecsv")
def test_read_not_json_serializable():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: string, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: column value is not valid JSON"
with pytest.raises(ValueError, match=match):
Table.read(txt, format="ascii.ecsv")
def test_read_bad_datatype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: object}
# schema: astropy-2.0
a
fail
[3,4]"""
with pytest.warns(
InvalidEcsvDatatypeWarning,
match="unexpected datatype 'object' of column 'a' is not in allowed",
):
t = Table.read(txt, format="ascii.ecsv")
assert t["a"][0] == "fail"
assert type(t["a"][1]) is str
assert type(t["a"].dtype) == np.dtype("O")
def test_read_complex():
"""Test an ECSV v1.0 file with a complex column"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: complex}
# schema: astropy-2.0
a
1+1j
2+2j"""
with pytest.warns(
InvalidEcsvDatatypeWarning,
match="unexpected datatype 'complex' of column 'a' is not in allowed",
):
t = Table.read(txt, format="ascii.ecsv")
assert t["a"].dtype.type is np.complex128
def test_read_str():
"""Test an ECSV file with a 'str' instead of 'string' datatype"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: str}
# schema: astropy-2.0
a
sometext
S""" # also testing single character text
with pytest.warns(
InvalidEcsvDatatypeWarning,
match="unexpected datatype 'str' of column 'a' is not in allowed",
):
t = Table.read(txt, format="ascii.ecsv")
assert isinstance(t["a"][1], str)
assert isinstance(t["a"][0], np.str_)
def test_read_bad_datatype_for_object_subtype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: int64, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: datatype of column 'a' must be \"string\""
with pytest.raises(ValueError, match=match):
Table.read(txt, format="ascii.ecsv")
def test_full_repr_roundtrip():
"""Test round-trip of float values to full precision even with format
specified"""
t = Table()
t["a"] = np.array([np.pi, 1 / 7], dtype=np.float64)
t["a"].info.format = ".2f"
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert np.all(t["a"] == t2["a"])
assert t2["a"].info.format == ".2f"
#############################################################################
# Define a number of specialized columns for testing and the expected values
# of `datatype` for each column.
#############################################################################
# First here is some helper code used to make the expected outputs code.
def _get_ecsv_header_dict(text):
lines = [line.strip() for line in text.splitlines()]
lines = [line[2:] for line in lines if line.startswith("#")]
lines = lines[2:] # Get rid of the header
out = yaml.safe_load("\n".join(lines))
return out
def _make_expected_values(cols):
from pprint import pformat
for name, col in cols.items():
t = Table()
t[name] = col
out = StringIO()
t.write(out, format="ascii.ecsv")
hdr = _get_ecsv_header_dict(out.getvalue())
fmt_hdr = pformat(hdr["datatype"])
print(f"exps[{name!r}] =", fmt_hdr[:1])
print(fmt_hdr[1:])
print()
# Expected values of `datatype` for each column
exps = {}
cols = {}
# Run of the mill scalar for completeness
cols["scalar"] = np.array([1, 2], dtype=np.int16)
exps["scalar"] = [{"datatype": "int16", "name": "scalar"}]
# Array of lists that works as a 2-d variable array. This is just treated
# as an object.
cols["2-d variable array lists"] = c = np.empty(shape=(2,), dtype=object)
c[0] = [[1, 2], ["a", 4]]
c[1] = [[1, 2, 3], [4, 5.25, 6]]
exps["2-d variable array lists"] = [
{"datatype": "string", "name": "2-d variable array lists", "subtype": "json"}
]
# Array of numpy arrays that is a 2-d variable array
cols["2-d variable array numpy"] = c = np.empty(shape=(2,), dtype=object)
c[0] = np.array([[1, 2], [3, 4]], dtype=np.float32)
c[1] = np.array([[1, 2, 3], [4, 5.5, 6]], dtype=np.float32)
exps["2-d variable array numpy"] = [
{
"datatype": "string",
"name": "2-d variable array numpy",
"subtype": "float32[2,null]",
}
]
cols["1-d variable array lists"] = np.array([[1, 2], [3, 4, 5]], dtype=object)
exps["1-d variable array lists"] = [
{"datatype": "string", "name": "1-d variable array lists", "subtype": "json"}
]
# Variable-length array
cols["1-d variable array numpy"] = np.array(
[np.array([1, 2], dtype=np.uint8), np.array([3, 4, 5], dtype=np.uint8)],
dtype=object,
)
exps["1-d variable array numpy"] = [
{"datatype": "string", "name": "1-d variable array numpy", "subtype": "uint8[null]"}
]
cols["1-d variable array numpy str"] = np.array(
[np.array(["a", "b"]), np.array(["c", "d", "e"])], dtype=object
)
exps["1-d variable array numpy str"] = [
{
"datatype": "string",
"name": "1-d variable array numpy str",
"subtype": "string[null]",
}
]
cols["1-d variable array numpy bool"] = np.array(
[np.array([True, False]), np.array([True, False, True])], dtype=object
)
exps["1-d variable array numpy bool"] = [
{
"datatype": "string",
"name": "1-d variable array numpy bool",
"subtype": "bool[null]",
}
]
cols["1-d regular array"] = np.array([[1, 2], [3, 4]], dtype=np.int8)
exps["1-d regular array"] = [
{"datatype": "string", "name": "1-d regular array", "subtype": "int8[2]"}
]
cols["2-d regular array"] = np.arange(8, dtype=np.float16).reshape(2, 2, 2)
exps["2-d regular array"] = [
{"datatype": "string", "name": "2-d regular array", "subtype": "float16[2,2]"}
]
cols["scalar object"] = np.array([{"a": 1}, {"b": 2}], dtype=object)
exps["scalar object"] = [
{"datatype": "string", "name": "scalar object", "subtype": "json"}
]
cols["1-d object"] = np.array(
[[{"a": 1}, {"b": 2}], [{"a": 1}, {"b": 2}]], dtype=object
)
exps["1-d object"] = [
{"datatype": "string", "name": "1-d object", "subtype": "json[2]"}
]
@pytest.mark.parametrize("name,col,exp", list(zip(cols, cols.values(), exps.values())))
def test_specialized_columns(name, col, exp):
"""Test variable length lists, multidim columns, object columns."""
t = Table()
t[name] = col
out = StringIO()
t.write(out, format="ascii.ecsv")
hdr = _get_ecsv_header_dict(out.getvalue())
assert hdr["datatype"] == exp
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
assert np.all(val1 == val2)
def test_full_subtypes():
"""Read ECSV file created by M. Taylor that includes scalar, fixed array,
variable array for all datatypes. This file has missing values for all
columns as both per-value null and blank entries for the entire column
value.
Note: original file was modified to include blank values in f_float and
f_double columns.
"""
t = Table.read(os.path.join(TEST_DIR, "data", "subtypes.ecsv"))
colnames = (
"i_index,"
"s_byte,s_short,s_int,s_long,s_float,s_double,s_string,s_boolean,"
"f_byte,f_short,f_int,f_long,f_float,f_double,f_string,f_boolean,"
"v_byte,v_short,v_int,v_long,v_float,v_double,v_string,v_boolean,"
"m_int,m_double"
).split(",")
assert t.colnames == colnames
type_map = {
"byte": "int8",
"short": "int16",
"int": "int32",
"long": "int64",
"float": "float32",
"double": "float64",
"string": "str",
"boolean": "bool",
}
for col in t.itercols():
info = col.info
if info.name == "i_index":
continue
assert isinstance(col, MaskedColumn)
type_name = info.name[2:] # short, int, etc
subtype = info.name[:1]
if subtype == "s": # Scalar
assert col.shape == (16,)
if subtype == "f": # Fixed array
assert col.shape == (16, 3)
if subtype == "v": # Variable array
assert col.shape == (16,)
assert info.dtype.name == "object"
for val in col:
assert isinstance(val, np.ndarray)
assert val.dtype.name.startswith(type_map[type_name])
assert len(val) in [0, 1, 2, 3]
else:
assert info.dtype.name.startswith(type_map[type_name])
def test_masked_empty_subtypes():
"""Test blank field in subtypes. Similar to previous test but with explicit
checks of values"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: o, datatype: string, subtype: json}
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
o f v
null [0,1] [1]
"" "" ""
[1,2] [2,3] [2,3]
"""
t = Table.read(txt, format="ascii.ecsv")
assert np.all(t["o"] == np.array([None, -1, [1, 2]], dtype=object))
assert np.all(t["o"].mask == [False, True, False])
exp = np.ma.array([[0, 1], [-1, -1], [2, 3]], mask=[[0, 0], [1, 1], [0, 0]])
assert np.all(t["f"] == exp)
assert np.all(t["f"].mask == exp.mask)
assert np.all(t["v"][0] == [1])
assert np.all(t["v"][2] == [2, 3])
assert np.all(t["v"].mask == [False, True, False])
def test_masked_vals_in_array_subtypes():
"""Test null values in fixed and variable array subtypes."""
t = Table()
t["f"] = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]], dtype=np.int64)
t["v"] = np.empty(2, dtype=object)
t["v"][0] = np.ma.array([1, 2], mask=[0, 1], dtype=np.int64)
t["v"][1] = np.ma.array([3, 4, 5], mask=[1, 0, 0], dtype=np.int64)
out = StringIO()
t.write(out, format="ascii.ecsv")
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
f v
[1,null] [1,null]
[null,4] [null,4,5]
"""
hdr = _get_ecsv_header_dict(out.getvalue())
hdr_exp = _get_ecsv_header_dict(txt)
assert hdr == hdr_exp
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
assert type(t2[name]) is type(t[name])
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
if isinstance(val1, np.ma.MaskedArray):
assert np.all(val1.mask == val2.mask)
assert np.all(val1 == val2)
def test_guess_ecsv_with_one_column():
"""Except for ECSV, guessing always requires at least 2 columns"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: col, datatype: string, description: hello}
# schema: astropy-2.0
col
1
2
"""
t = ascii.read(txt)
assert t["col"].dtype.kind == "U" # would be int with basic format
assert t["col"].description == "hello"
|
e9a11cd23e97428d9a20f8f6d5199361367ccd3e9ed1fbfcee868463707e53ad | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.types is deprecated.*",
)
from asdf.types import CustomType, ExtensionTypeMeta
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
__all__ = ["AstropyType", "AstropyAsdfType"]
# Names of AstropyType or AstropyAsdfType subclasses that are base classes
# and aren't used directly for serialization.
_TYPE_BASE_CLASS_NAMES = {"PolynomialTypeBase"}
_astropy_types = set()
_astropy_asdf_types = set()
class AstropyTypeMeta(ExtensionTypeMeta):
"""
Keeps track of `AstropyType` subclasses that are created so that they can
be stored automatically by astropy extensions for ASDF.
"""
def __new__(mcls, name, bases, attrs):
cls = super().__new__(mcls, name, bases, attrs)
# Classes using this metaclass are automatically added to the list of
# astropy extensions
if cls.__name__ not in _TYPE_BASE_CLASS_NAMES:
if cls.organization == "astropy.org" and cls.standard == "astropy":
_astropy_types.add(cls)
elif cls.organization == "stsci.edu" and cls.standard == "asdf":
_astropy_asdf_types.add(cls)
return cls
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r".*from astropy.io.misc.asdf.* subclasses the deprecated CustomType .*",
)
class AstropyType(CustomType, metaclass=AstropyTypeMeta):
"""
This class represents types that have schemas and tags that are defined by
Astropy.
IMPORTANT: This parent class should **not** be used for types that have
schemas that are defined by the ASDF standard.
"""
organization = "astropy.org"
standard = "astropy"
@classmethod
def to_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().to_tree_tagged(node, ctx)
@classmethod
def from_tree_tagged(cls, tree, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().from_tree_tagged(tree, ctx)
class AstropyAsdfType(CustomType, metaclass=AstropyTypeMeta):
"""
This class represents types that have schemas that are defined in the ASDF
standard, but have tags that are implemented within astropy.
IMPORTANT: This parent class should **not** be used for types that also
have schemas that are defined by astropy.
"""
organization = "stsci.edu"
standard = "asdf"
@classmethod
def to_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().to_tree_tagged(node, ctx)
@classmethod
def from_tree_tagged(cls, tree, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().from_tree_tagged(tree, ctx)
|
07a03e35b1b9db5c731e6f3b797af9297cb21e0c51e0eccbda19a2fc375ca4d0 | from pathlib import Path
from astropy.utils.introspection import minversion
def get_asdf_tests():
# return a list of filenames for all ".py" files in this
# directory and recursively in every sub directory. These
# are the files that pytest will import while attempting
# to find tests. This list is used below to ignore all of
# these files if an incompatible version of ASDF is installed
asdf_dir = Path(__file__).parent.resolve()
paths = Path(asdf_dir).rglob("*.py")
return [str(p.relative_to(asdf_dir)) for p in paths]
collect_ignore = get_asdf_tests()
try:
import asdf
except ImportError:
pass
else:
if not minversion(asdf, "3.0.0.dev"):
collect_ignore = []
|
cbc56443d2407d24b728d973ae375a3825d6f74ea73c20210b05826ec73063e8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import warnings
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"AsdfExtension is deprecated.*",
)
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"BuiltinExtension is deprecated.*",
)
from asdf.extension import AsdfExtension, BuiltinExtension
from asdf.util import filepath_to_url
# Make sure that all tag implementations are imported by the time we create
# the extension class so that _astropy_asdf_types is populated correctly. We
# could do this using __init__ files, except it causes pytest import errors in
# the case that asdf is not installed.
from .tags.coordinates.angle import *
from .tags.coordinates.earthlocation import *
from .tags.coordinates.frames import *
from .tags.coordinates.representation import *
from .tags.coordinates.skycoord import *
from .tags.coordinates.spectralcoord import *
from .tags.fits.fits import *
from .tags.table.table import *
from .tags.time.time import *
from .tags.time.timedelta import *
from .tags.transform.basic import *
from .tags.transform.compound import *
from .tags.transform.functional_models import *
from .tags.transform.math import *
from .tags.transform.physical_models import *
from .tags.transform.polynomial import *
from .tags.transform.powerlaws import *
from .tags.transform.projections import *
from .tags.transform.spline import *
from .tags.transform.tabular import *
from .tags.unit.equivalency import *
from .tags.unit.quantity import *
from .tags.unit.unit import *
from .types import _astropy_asdf_types, _astropy_types
__all__ = ["AstropyExtension", "AstropyAsdfExtension"]
ASTROPY_SCHEMA_URI_BASE = "http://astropy.org/schemas/"
SCHEMA_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "data", "schemas")
)
ASTROPY_URL_MAPPING = [
(
ASTROPY_SCHEMA_URI_BASE,
filepath_to_url(os.path.join(SCHEMA_PATH, "astropy.org"))
+ "/{url_suffix}.yaml",
)
]
# This extension is used to register custom types that have both tags and
# schemas defined by Astropy.
class AstropyExtension(AsdfExtension):
@property
def types(self):
return _astropy_types
@property
def tag_mapping(self):
return [
("tag:astropy.org:astropy", ASTROPY_SCHEMA_URI_BASE + "astropy{tag_suffix}")
]
@property
def url_mapping(self):
return ASTROPY_URL_MAPPING
# This extension is used to register custom tag types that have schemas defined
# by ASDF, but have tag implementations defined in astropy.
class AstropyAsdfExtension(BuiltinExtension):
@property
def types(self):
return _astropy_asdf_types
|
516108b34cf2224777c570b0307f08d64059158e82c37725ea692cd3b9fcc698 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import numpy as np
import pytest
from astropy.io.misc.hdf5 import meta_path
from astropy.table import Column, QTable, Table
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units.quantity import QuantityInfo
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.compat.optional_deps import HAS_H5PY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
if HAS_H5PY:
import h5py
from astropy.io.tests.mixin_columns import compare_attrs, mixin_cols, serialized_names
# HDF5 does not support object dtype (since it stores binary representations).
unsupported_cols = {
name: col
for name, col in mixin_cols.items()
if (isinstance(col, np.ndarray) and col.dtype.kind == "O")
}
mixin_cols = {
name: col for name, col in mixin_cols.items() if name not in unsupported_cols
}
ALL_DTYPES = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.int8,
np.int16,
np.int32,
np.int64,
np.float32,
np.float64,
np.bool_,
"|S3",
]
def _default_values(dtype):
if dtype == np.bool_:
return [0, 1, 1]
elif dtype == "|S3":
return [b"abc", b"def", b"ghi"]
else:
return [1, 2, 3]
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_write_nopath(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.warns(
UserWarning, match="table path was not set via the path= argument"
):
t1.write(test_file)
t1 = Table.read(test_file, path="__astropy_table__")
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_write_nopath_nonempty(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="bubu")
with pytest.raises(ValueError) as exc:
t1.write(test_file, append=True)
assert "table path should always be set via the path=" in exc.value.args[0]
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_notable_nopath(tmp_path):
test_file = tmp_path / "test.hdf5"
h5py.File(test_file, "w").close() # create empty file
with pytest.raises(ValueError, match="no table found in HDF5 group /"):
Table.read(test_file, path="/", format="hdf5")
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_nopath(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t2 = Table.read(test_file)
assert np.all(t1["a"] == t2["a"])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_nopath_multi_tables(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t1.write(test_file, path="the_table_but_different", append=True, overwrite=True)
with pytest.warns(
AstropyUserWarning, match=r"path= was not specified but multiple tables"
):
t2 = Table.read(test_file)
assert np.all(t1["a"] == t2["a"])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_write_invalid_path(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.raises(ValueError) as exc:
t1.write(test_file, path="test/")
assert exc.value.args[0] == "table path should end with table name, not /"
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_invalid_path(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="the_table")
with pytest.raises(OSError) as exc:
Table.read(test_file, path="test/")
assert exc.value.args[0] == "Path test/ does not exist"
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_missing_group(tmp_path):
test_file = tmp_path / "test.hdf5"
h5py.File(test_file, "w").close() # create empty file
with pytest.raises(OSError) as exc:
Table.read(test_file, path="test/path/table")
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_missing_table(tmp_path):
test_file = tmp_path / "test.hdf5"
with h5py.File(test_file, "w") as f:
f.create_group("test").create_group("path")
with pytest.raises(OSError) as exc:
Table.read(test_file, path="test/path/table")
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_missing_group_fileobj(tmp_path):
test_file = tmp_path / "test.hdf5"
with h5py.File(test_file, "w") as f:
with pytest.raises(OSError) as exc:
Table.read(f, path="test/path/table")
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_simple(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t2 = Table.read(test_file, path="the_table")
assert np.all(t2["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_existing_table(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="the_table")
with pytest.raises(OSError) as exc:
t1.write(test_file, path="the_table", append=True)
assert exc.value.args[0] == "Table the_table already exists"
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_memory():
with h5py.File("test", "w", driver="core", backing_store=False) as output_file:
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(output_file, path="the_table")
t2 = Table.read(output_file, path="the_table")
assert np.all(t2["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_existing(tmp_path):
test_file = tmp_path / "test.hdf5"
h5py.File(test_file, "w").close() # create empty file
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t1.write(test_file, path="the_table")
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_existing_overwrite(tmp_path):
test_file = tmp_path / "test.hdf5"
h5py.File(test_file, "w").close() # create empty file
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="the_table", overwrite=True)
t2 = Table.read(test_file, path="the_table")
assert np.all(t2["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_existing_append(tmp_path):
test_file = tmp_path / "test.hdf5"
h5py.File(test_file, "w").close() # create empty file
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="the_table_1", append=True)
t1.write(test_file, path="the_table_2", append=True)
t2 = Table.read(test_file, path="the_table_1")
assert np.all(t2["a"] == [1, 2, 3])
t3 = Table.read(test_file, path="the_table_2")
assert np.all(t3["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_existing_append_groups(tmp_path):
test_file = tmp_path / "test.hdf5"
with h5py.File(test_file, "w") as f:
f.create_group("test_1")
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="test_1/the_table_1", append=True)
t1.write(test_file, path="test_2/the_table_2", append=True)
t2 = Table.read(test_file, path="test_1/the_table_1")
assert np.all(t2["a"] == [1, 2, 3])
t3 = Table.read(test_file, path="test_2/the_table_2")
assert np.all(t3["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_existing_append_overwrite(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="table1")
t1.write(test_file, path="table2", append=True)
t1v2 = Table()
t1v2.add_column(Column(name="a", data=[4, 5, 6]))
with pytest.raises(OSError) as exc:
t1v2.write(test_file, path="table1", append=True)
assert exc.value.args[0] == "Table table1 already exists"
t1v2.write(test_file, path="table1", append=True, overwrite=True)
t2 = Table.read(test_file, path="table1")
assert np.all(t2["a"] == [4, 5, 6])
t3 = Table.read(test_file, path="table2")
assert np.all(t3["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_fileobj(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="the_table")
import h5py
with h5py.File(test_file, "r") as input_file:
t2 = Table.read(input_file, path="the_table")
assert np.all(t2["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_filobj_path(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="path/to/data/the_table")
import h5py
with h5py.File(test_file, "r") as input_file:
t2 = Table.read(input_file, path="path/to/data/the_table")
assert np.all(t2["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_filobj_group_path(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, path="path/to/data/the_table")
import h5py
with h5py.File(test_file, "r") as input_file:
t2 = Table.read(input_file["path/to"], path="data/the_table")
assert np.all(t2["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_wrong_fileobj():
class FakeFile:
def read(self):
pass
f = FakeFile()
with pytest.raises(TypeError, match="h5py can only open regular files"):
Table.read(f, format="hdf5")
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_write_fileobj(tmp_path):
test_file = tmp_path / "test.hdf5"
import h5py
with h5py.File(test_file, "w") as output_file:
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(output_file, path="the_table")
t2 = Table.read(test_file, path="the_table")
assert np.all(t2["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_write_create_dataset_kwargs(tmp_path):
test_file = tmp_path / "test.hdf5"
the_path = "the_table"
import h5py
with h5py.File(test_file, "w") as output_file:
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(output_file, path=the_path, maxshape=(None,))
# A roundabout way of checking this, but the table created above should be
# resizable if the kwarg was passed through successfully
t2 = Table()
t2.add_column(Column(name="a", data=[4, 5]))
with h5py.File(test_file, "a") as output_file:
output_file[the_path].resize((len(t1) + len(t2),))
output_file[the_path][len(t1) :] = t2.as_array()
t3 = Table.read(test_file, path="the_table")
assert np.all(t3["a"] == [1, 2, 3, 4, 5])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_write_filobj_group(tmp_path):
test_file = tmp_path / "test.hdf5"
import h5py
with h5py.File(test_file, "w") as output_file:
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(output_file, path="path/to/data/the_table")
t2 = Table.read(test_file, path="path/to/data/the_table")
assert np.all(t2["a"] == [1, 2, 3])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_write_wrong_type():
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.raises(TypeError) as exc:
t1.write(1212, path="path/to/data/the_table", format="hdf5")
assert (
exc.value.args[0] == "output should be a string or an h5py File or Group object"
)
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
@pytest.mark.parametrize("dtype", ALL_DTYPES)
def test_preserve_single_dtypes(tmp_path, dtype):
test_file = tmp_path / "test.hdf5"
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file, path="the_table")
t2 = Table.read(test_file, path="the_table")
assert np.all(t2["a"] == values)
assert t2["a"].dtype == dtype
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_preserve_all_dtypes(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
t1.write(test_file, path="the_table")
t2 = Table.read(test_file, path="the_table")
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_preserve_meta(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.meta["a"] = 1
t1.meta["b"] = "hello"
t1.meta["c"] = 3.14159
t1.meta["d"] = True
t1.meta["e"] = np.array([1, 2, 3])
t1.write(test_file, path="the_table")
t2 = Table.read(test_file, path="the_table")
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_preserve_serialized(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.write(test_file, path="the_table", serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path="the_table")
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta
# Check that the meta table is fixed-width bytes (see #11299)
h5 = h5py.File(test_file, "r")
meta_lines = h5[meta_path("the_table")]
assert meta_lines.dtype.kind == "S"
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_preserve_serialized_old_meta_format():
"""Test the old meta format
Only for some files created prior to v4.0, in compatibility mode.
"""
test_file = get_pkg_data_filename("data/old_meta_example.hdf5")
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t2 = Table.read(test_file, path="the_table")
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_preserve_serialized_in_complicated_path(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.write(
test_file,
path="the_table/complicated/path",
serialize_meta=True,
overwrite=True,
)
t2 = Table.read(test_file, path="the_table/complicated/path")
assert t1["a"].format == t2["a"].format
assert t1["a"].unit == t2["a"].unit
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_metadata_very_large(tmp_path):
"""Test that very large datasets work, now!"""
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2**16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2**18)
t1.write(test_file, path="the_table", serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path="the_table")
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_skip_meta(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.meta["a"] = 1
t1.meta["b"] = "hello"
t1.meta["c"] = 3.14159
t1.meta["d"] = True
t1.meta["e"] = np.array([1, 2, 3])
t1.meta["f"] = str
wtext = (
f"Attribute `f` of type {type(t1.meta['f'])} cannot be written to HDF5 files -"
" skipping"
)
with pytest.warns(AstropyUserWarning, match=wtext) as w:
t1.write(test_file, path="the_table")
assert len(w) == 1
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_fail_meta_serialize(tmp_path):
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.meta["f"] = str
with pytest.raises(Exception) as err:
t1.write(test_file, path="the_table", serialize_meta=True)
assert "cannot represent an object" in str(err.value)
assert "<class 'str'>" in str(err.value)
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_h5py_objects(tmp_path):
# Regression test - ensure that Datasets are recognized automatically
test_file = tmp_path / "test.hdf5"
import h5py
with h5py.File(test_file, "w") as output_file:
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(output_file, path="the_table")
f = h5py.File(test_file, mode="r")
t2 = Table.read(f, path="the_table")
assert np.all(t2["a"] == [1, 2, 3])
t3 = Table.read(f["/"], path="the_table")
assert np.all(t3["a"] == [1, 2, 3])
t4 = Table.read(f["the_table"])
assert np.all(t4["a"] == [1, 2, 3])
f.close() # don't leave the file open
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_unicode_to_hdf5(tmp_path):
test_file = tmp_path / "test.hdf5"
t = Table()
t["p"] = ["a", "b", "c"]
t["q"] = [1, 2, 3]
t["r"] = [b"a", b"b", b"c"]
t["s"] = ["\u2119", "\u01b4", "\u2602"]
t.write(test_file, path="the_table", overwrite=True)
t1 = Table.read(test_file, path="the_table", character_as_bytes=False)
for col, col1 in zip(t.itercols(), t1.itercols()):
assert np.all(col == col1)
assert np.all(t1["p"].info.dtype.kind == "U")
assert np.all(t1["q"].info.dtype.kind == "i")
assert np.all(t1["r"].info.dtype.kind == "U")
assert np.all(t1["s"].info.dtype.kind == "U")
# Test default (character_as_bytes=True)
t2 = Table.read(test_file, path="the_table")
for col, col1 in zip(t.itercols(), t2.itercols()):
assert np.all(col == col1)
assert np.all(t2["p"].info.dtype.kind == "S")
assert np.all(t2["q"].info.dtype.kind == "i")
assert np.all(t2["r"].info.dtype.kind == "S")
assert np.all(t2["s"].info.dtype.kind == "S")
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = [
"info.name",
"info.format",
"info.unit",
"info.description",
"info.meta",
"info.dtype",
]
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split("."):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == "info.meta":
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == "f":
assert quantity_allclose(a1, a2, rtol=1e-15)
elif isinstance(a1, np.dtype):
# HDF5 does not perfectly preserve dtype: byte order can change, and
# unicode gets stored as bytes. So, we just check safe casting, to
# ensure we do not, e.g., accidentally change integer to float, etc.
if NUMPY_LT_1_22 and a1.names:
# For old numpy, can_cast does not deal well with structured dtype.
assert a1.names == a2.names
else:
assert np.can_cast(a2, a1, casting="safe")
else:
assert np.all(a1 == a2)
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_hdf5_mixins_qtable_to_table(tmp_path):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmp_path / "test_simple.hdf5"
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format="hdf5", path="root", serialize_meta=True)
t2 = Table.read(filename, format="hdf5", path="root")
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_hdf5_mixins_as_one(table_cls, tmp_path):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmp_path / "test_simple.hdf5"
names = sorted(mixin_cols)
all_serialized_names = []
for name in names:
all_serialized_names.extend(serialized_names[name])
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="hdf5", path="root", serialize_meta=True)
t2 = table_cls.read(filename, format="hdf5", path="root")
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
# Read directly via hdf5 and confirm column names
h5 = h5py.File(filename, "r")
h5_names = list(h5["root"].dtype.names)
assert h5_names == all_serialized_names
h5.close()
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
@pytest.mark.parametrize("name_col", list(mixin_cols.items()))
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_hdf5_mixins_per_column(table_cls, name_col, tmp_path):
"""Test write/read one col at a time and do detailed validation"""
filename = tmp_path / "test_simple.hdf5"
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "my description"
t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}}
if not t.has_mixin_columns:
pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)")
t.write(filename, format="hdf5", path="root", serialize_meta=True)
t2 = table_cls.read(filename, format="hdf5", path="root")
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ["data"] if colname in ("c1", "c2") else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize("name_col", unsupported_cols.items())
@pytest.mark.xfail(reason="column type unsupported")
def test_fits_unsupported_mixin(self, name_col, tmp_path):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = tmp_path / "test_simple.fits"
name, col = name_col
Table([col], names=[name]).write(
filename, format="hdf5", path="root", serialize_meta=True
)
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_round_trip_masked_table_default(tmp_path):
"""Test round-trip of MaskedColumn through HDF5 using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = tmp_path / "test.h5"
t = simple_table(masked=True) # int, float, and str cols with one masked element
t["c"] = [b"c", b"d", b"e"]
t["c"].mask[1] = True
t.write(filename, format="hdf5", path="root", serialize_meta=True)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_overwrite_serialized_meta():
# This used to cause an error because the meta data table
# was not removed from the existing file.
with h5py.File("test_data.h5", "w", driver="core", backing_store=False) as out:
t1 = Table()
t1.add_column(Column(data=[4, 8, 15], unit="cm"))
t1.write(out, path="data", serialize_meta=True)
t2 = Table.read(out, path="data")
assert all(t1 == t2)
assert t1.info(out=None) == t2.info(out=None)
t3 = Table()
t3.add_column(Column(data=[16, 23, 42], unit="g"))
t3.write(out, path="data", serialize_meta=True, append=True, overwrite=True)
t2 = Table.read(out, path="data")
assert all(t3 == t2)
assert t3.info(out=None) == t2.info(out=None)
@pytest.mark.skipif(not HAS_H5PY, reason="requires h5py")
def test_read_write_tilde_path(home_is_tmpdir):
test_file = os.path.join("~", "test.hdf5")
t1 = Table()
t1["a"] = [1, 2, 3]
t1.write(test_file, path="the_table")
t1 = Table.read(test_file, path="the_table")
t1 = Table.read(test_file, path="the_table", format="hdf5")
# Ensure the data wasn't written to the literal tilde-prefixed path
assert not os.path.exists(test_file)
|
c80568803e60d02bbd19a8c04c0039c3fd2ae1b6aaf83d2bf5ba7d21cf6b4546 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from asdf import tagged
from asdf.exceptions import AsdfDeprecationWarning
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from astropy.modeling.core import CompoundModel, Model
from astropy.modeling.models import Const1D, Identity, Mapping
__all__ = ["CompoundType", "RemapAxesType"]
_operator_to_tag_mapping = {
"+": "add",
"-": "subtract",
"*": "multiply",
"/": "divide",
"**": "power",
"|": "compose",
"&": "concatenate",
"fix_inputs": "fix_inputs",
}
_tag_to_method_mapping = {
"add": "__add__",
"subtract": "__sub__",
"multiply": "__mul__",
"divide": "__truediv__",
"power": "__pow__",
"compose": "__or__",
"concatenate": "__and__",
"fix_inputs": "fix_inputs",
}
class CompoundType(TransformType):
name = ["transform/" + x for x in _tag_to_method_mapping.keys()]
types = [CompoundModel]
version = "1.2.0"
handle_dynamic_subclasses = True
@classmethod
def from_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
tag = node._tag[node._tag.rfind("/") + 1 :]
tag = tag[: tag.rfind("-")]
oper = _tag_to_method_mapping[tag]
left = node["forward"][0]
if not isinstance(left, Model):
raise TypeError(f"Unknown model type '{node['forward'][0]._tag}'")
right = node["forward"][1]
if not isinstance(right, Model) and not (
oper == "fix_inputs" and isinstance(right, dict)
):
raise TypeError(f"Unknown model type '{node['forward'][1]._tag}'")
if oper == "fix_inputs":
right = dict(zip(right["keys"], right["values"]))
model = CompoundModel("fix_inputs", left, right)
else:
model = getattr(left, oper)(right)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def to_tree_tagged(cls, model, ctx):
warnings.warn(create_asdf_deprecation_warning())
left = model.left
if isinstance(model.right, dict):
right = {
"keys": list(model.right.keys()),
"values": list(model.right.values()),
}
else:
right = model.right
node = {"forward": [left, right]}
try:
tag_name = "transform/" + _operator_to_tag_mapping[model.op]
except KeyError:
raise ValueError(f"Unknown operator '{model.op}'")
node = tagged.tag_object(cls.make_yaml_tag(tag_name), node, ctx=ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_tree_match
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert_tree_match(a.left, b.left)
assert_tree_match(a.right, b.right)
class RemapAxesType(TransformType):
name = "transform/remap_axes"
types = [Mapping]
version = "1.3.0"
@classmethod
def from_tree_transform(cls, node, ctx):
mapping = node["mapping"]
n_inputs = node.get("n_inputs")
if all(isinstance(x, int) for x in mapping):
return Mapping(tuple(mapping), n_inputs)
if n_inputs is None:
n_inputs = max(x for x in mapping if isinstance(x, int)) + 1
transform = Identity(n_inputs)
new_mapping = []
i = n_inputs
for entry in mapping:
if isinstance(entry, int):
new_mapping.append(entry)
else:
new_mapping.append(i)
transform = transform & Const1D(entry.value)
i += 1
return transform | Mapping(new_mapping)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {"mapping": list(model.mapping)}
if model.n_inputs > max(model.mapping) + 1:
node["n_inputs"] = model.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
TransformType.assert_equal(a, b)
assert a.mapping == b.mapping
assert a.n_inputs == b.n_inputs
|
dbffd00daf63a5e58e8118dbd21c98dda076c31f327ff76be81f0ece2455a374 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
def run_schema_example_test(organization, standard, name, version, check_func=None):
import asdf
from asdf.exceptions import AsdfDeprecationWarning
from asdf.schema import load_schema
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import yaml_to_asdf
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.types.*is deprecated.*",
)
from asdf.types import format_tag
tag = format_tag(organization, standard, version, name)
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"default_extensions is deprecated.*",
)
uri = asdf.extension.default_extensions.extension_list.tag_mapping(tag)
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"get_default_resolver is deprecated.*",
)
r = asdf.extension.get_default_resolver()
examples = []
schema = load_schema(uri, resolver=r)
for node in asdf.treeutil.iter_tree(schema):
if (
isinstance(node, dict)
and "examples" in node
and isinstance(node["examples"], list)
):
for _, example in node["examples"]:
examples.append(example)
for example in examples:
buff = yaml_to_asdf("example: " + example.strip())
ff = asdf.AsdfFile(uri=uri)
# Add some dummy blocks so that the ndarray examples work
for i in range(3):
b = asdf.block.Block(np.zeros((1024 * 1024 * 8), dtype=np.uint8))
b._used = True
ff.blocks.add(b)
ff._open_impl(ff, buff, mode="r")
if check_func:
check_func(ff)
|
299e1b6ba0f7d0ec6248049f266b7ef388185055a6c40e3098cae9900c57b50c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
asdf = pytest.importorskip("asdf")
import numpy as np
from asdf.exceptions import AsdfDeprecationWarning
from asdf.tags.core.ndarray import NDArrayType
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree, yaml_to_asdf
from packaging.version import Version
import astropy.units as u
from astropy import table
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
from astropy.time import Time, TimeDelta
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_table(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"))
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 3
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_array_columns(tmpdir):
a = np.array(
[
([[1, 2], [3, 4]], 2.0, "x"),
([[5, 6], [7, 8]], 5.0, "y"),
([[9, 10], [11, 12]], 8.2, "z"),
],
dtype=[("a", "<i4", (2, 2)), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
assert t.columns["a"].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_structured_array_columns(tmpdir):
a = np.array(
[((1, "a"), 2.0, "x"), ((4, "b"), 5.0, "y"), ((5, "c"), 8.2, "z")],
dtype=[("a", [("a0", "<i4"), ("a1", "|S1")]), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_table_row_order(tmpdir):
a = np.array(
[(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")],
dtype=[("a", "<i4"), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 1
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"))
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
if Version(asdf.__version__) >= Version("2.8.0"):
# The auto_inline argument is deprecated as of asdf 2.8.0.
with asdf.config_context() as config:
config.array_inline_threshold = 64
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
else:
assert_roundtrip_tree(
{"table": t},
tmpdir,
asdf_check_func=check,
write_options={"auto_inline": 64},
)
def test_mismatched_columns():
yaml = """
table: !<tag:astropy.org:astropy/table/table-1.0.0>
columns:
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2]
name: a
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2, 3]
name: b
colnames: [a, b]
"""
buff = yaml_to_asdf(yaml)
with pytest.raises(ValueError) as err:
with asdf.open(buff):
pass
assert "Inconsistent data column lengths" in str(err.value)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(
rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"), masked=True
)
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["a"].mask = [True, False, True]
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 4
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_quantity_mixin(tmpdir):
t = table.QTable()
t["a"] = [1, 2, 3]
t["b"] = ["x", "y", "z"]
t["c"] = [2.0, 5.0, 8.2] * u.m
def check(ff):
assert isinstance(ff["table"]["c"], u.Quantity)
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_time_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"])
def check(ff):
assert isinstance(ff["table"]["c"], Time)
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_timedelta_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = TimeDelta([1, 2] * u.day)
def check(ff):
assert isinstance(ff["table"]["c"], TimeDelta)
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_skycoord_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = SkyCoord([1, 2], [3, 4], unit="deg,deg", frame="fk4", obstime="J1990.5")
def check(ff):
assert isinstance(ff["table"]["c"], SkyCoord)
def tree_match(old, new):
NDArrayType.assert_equal(new["a"], old["a"])
NDArrayType.assert_equal(new["b"], old["b"])
assert skycoord_equal(new["c"], old["c"])
assert_roundtrip_tree(
{"table": t}, tmpdir, asdf_check_func=check, tree_match_func=tree_match
)
def test_earthlocation_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
def check(ff):
assert isinstance(ff["table"]["c"], EarthLocation)
assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_ndarray_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = table.NdarrayMixin([5, 6])
assert_roundtrip_tree({"table": t}, tmpdir)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_backwards_compat():
"""
Make sure that we can continue to read tables that use the schema from
the ASDF Standard.
This test uses the examples in the table schema from the ASDF Standard,
since these make no reference to Astropy's own table definition.
"""
def check(asdffile):
assert isinstance(asdffile["example"], table.Table)
run_schema_example_test("stsci.edu", "asdf", "core/table", "1.0.0", check)
|
a56183dc5ab36c6e44a0b09b8f6507cfe7050ae947c1dce8e2944dba1ffa9643 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
asdf = pytest.importorskip("asdf")
import os
import numpy as np
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
from astropy.io import fits
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_complex_structure(tmpdir):
with fits.open(
os.path.join(os.path.dirname(__file__), "data", "complex.fits"), memmap=False
) as hdulist:
tree = {"fits": hdulist}
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_fits_table(tmpdir):
a = np.array([(0, 1), (2, 3)], dtype=[("A", int), ("B", int)])
h = fits.HDUList()
h.append(fits.BinTableHDU.from_columns(a))
tree = {"fits": h}
def check_yaml(content):
assert b"!<tag:astropy.org:astropy/table/table-1.0.0>" in content
assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_backwards_compat():
"""
Make sure that we can continue to read FITS HDUs that use the schema from
the ASDF Standard.
This test uses the examples in the fits schema from the ASDF Standard,
since these make no reference to Astropy's own fits definition.
"""
def check(asdffile):
assert isinstance(asdffile["example"], fits.HDUList)
run_schema_example_test("stsci.edu", "asdf", "fits/fits", "1.0.0", check)
|
496b13996cbd98b2e2c935857f6f8a56c74c938ff43df27a39213aef6ae107d8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.modeling.models import UnitsMapping
def assert_model_roundtrip(model, tmpdir):
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir, tree_match_func=assert_models_equal)
def assert_models_equal(a, b):
assert a.name == b.name
assert a.inputs == b.inputs
assert a.input_units == b.input_units
assert a.outputs == b.outputs
assert a.mapping == b.mapping
assert a.input_units_allow_dimensionless == b.input_units_allow_dimensionless
for i in a.inputs:
if a.input_units_equivalencies is None:
a_equiv = None
else:
a_equiv = a.input_units_equivalencies.get(i)
if b.input_units_equivalencies is None:
b_equiv = None
else:
b_equiv = b.input_units_equivalencies.get(i, None)
assert a_equiv == b_equiv
def test_basic(tmpdir):
m = UnitsMapping(((u.m, u.dimensionless_unscaled),))
assert_model_roundtrip(m, tmpdir)
def test_remove_units(tmpdir):
m = UnitsMapping(((u.m, None),))
assert_model_roundtrip(m, tmpdir)
def test_accept_any_units(tmpdir):
m = UnitsMapping(((None, u.m),))
assert_model_roundtrip(m, tmpdir)
def test_with_equivalencies(tmpdir):
m = UnitsMapping(
((u.m, u.dimensionless_unscaled),),
input_units_equivalencies={"x": u.equivalencies.spectral()},
)
assert_model_roundtrip(m, tmpdir)
def test_with_allow_dimensionless(tmpdir):
m = UnitsMapping(
((u.m, u.dimensionless_unscaled), (u.s, u.Hz)),
input_units_allow_dimensionless=True,
)
assert_model_roundtrip(m, tmpdir)
m = UnitsMapping(
((u.m, u.dimensionless_unscaled), (u.s, u.Hz)),
input_units_allow_dimensionless={"x0": True, "x1": False},
)
assert_model_roundtrip(m, tmpdir)
|
31a8302f5ac924e08785866352b11b7c80879633f453016573ecd8696bc53bb4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import warnings
import asdf
import numpy as np
from asdf import AsdfFile, util
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree, yaml_to_asdf
from packaging.version import Version
import astropy.units as u
from astropy.modeling import models as astmodels
from astropy.modeling.core import fix_inputs
from astropy.utils.compat.optional_deps import HAS_SCIPY
def custom_and_analytical_inverse():
p1 = astmodels.Polynomial1D(1)
p2 = astmodels.Polynomial1D(1)
p3 = astmodels.Polynomial1D(1)
p4 = astmodels.Polynomial1D(1)
m1 = p1 & p2
m2 = p3 & p4
m1.inverse = m2
return m1
def custom_inputs_outputs():
m = astmodels.Gaussian2D()
m.inputs = ("a", "b")
m.outputs = ("c",)
return m
test_models = [
astmodels.Identity(2),
astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Shift(2.0),
astmodels.Hermite1D(2, c0=2, c1=3, c2=0.5),
astmodels.Legendre1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Legendre2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Hermite2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Scale(3.4),
astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.Multiply(3),
astmodels.Multiply(10 * u.m),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order="xzx"),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.0 * u.deg),
astmodels.Scale(3.4 * u.deg),
astmodels.RotateNative2Celestial(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg),
astmodels.RotateCelestial2Native(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg),
astmodels.RotationSequence3D([1.2, 2.3, 3.4, 0.3], "xyzx"),
astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, 0.3], "xyzy"),
astmodels.AiryDisk2D(amplitude=10.0, x_0=0.5, y_0=1.5),
astmodels.Box1D(amplitude=10.0, x_0=0.5, width=5.0),
astmodels.Box2D(amplitude=10.0, x_0=0.5, x_width=5.0, y_0=1.5, y_width=7.0),
astmodels.Const1D(amplitude=5.0),
astmodels.Const2D(amplitude=5.0),
astmodels.Disk2D(amplitude=10.0, x_0=0.5, y_0=1.5, R_0=5.0),
astmodels.Ellipse2D(amplitude=10.0, x_0=0.5, y_0=1.5, a=2.0, b=4.0, theta=0.1),
astmodels.Exponential1D(amplitude=10.0, tau=3.5),
astmodels.Gaussian1D(amplitude=10.0, mean=5.0, stddev=3.0),
astmodels.Gaussian2D(
amplitude=10.0, x_mean=5.0, y_mean=5.0, x_stddev=3.0, y_stddev=3.0
),
astmodels.KingProjectedAnalytic1D(amplitude=10.0, r_core=5.0, r_tide=2.0),
astmodels.Logarithmic1D(amplitude=10.0, tau=3.5),
astmodels.Lorentz1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Moffat1D(amplitude=10.0, x_0=0.5, gamma=1.2, alpha=2.5),
astmodels.Moffat2D(amplitude=10.0, x_0=0.5, y_0=1.5, gamma=1.2, alpha=2.5),
astmodels.Planar2D(slope_x=0.5, slope_y=1.2, intercept=2.5),
astmodels.RedshiftScaleFactor(z=2.5),
astmodels.RickerWavelet1D(amplitude=10.0, x_0=0.5, sigma=1.2),
astmodels.RickerWavelet2D(amplitude=10.0, x_0=0.5, y_0=1.5, sigma=1.2),
astmodels.Ring2D(amplitude=10.0, x_0=0.5, y_0=1.5, r_in=5.0, width=10.0),
astmodels.Sersic1D(amplitude=10.0, r_eff=1.0, n=4.0),
astmodels.Sersic2D(
amplitude=10.0, r_eff=1.0, n=4.0, x_0=0.5, y_0=1.5, ellip=0.0, theta=0.0
),
astmodels.Sine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.Cosine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.Tangent1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.ArcSine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.ArcCosine1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.ArcTangent1D(amplitude=10.0, frequency=0.5, phase=1.0),
astmodels.Trapezoid1D(amplitude=10.0, x_0=0.5, width=5.0, slope=1.0),
astmodels.TrapezoidDisk2D(amplitude=10.0, x_0=0.5, y_0=1.5, R_0=5.0, slope=1.0),
astmodels.Voigt1D(x_0=0.55, amplitude_L=10.0, fwhm_L=0.5, fwhm_G=0.9),
astmodels.BlackBody(scale=10.0, temperature=6000.0 * u.K),
astmodels.Drude1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Plummer1D(mass=10.0, r_plum=5.0),
astmodels.BrokenPowerLaw1D(amplitude=10, x_break=0.5, alpha_1=2.0, alpha_2=3.5),
astmodels.ExponentialCutoffPowerLaw1D(10, 0.5, 2.0, 7.0),
astmodels.LogParabola1D(
amplitude=10,
x_0=0.5,
alpha=2.0,
beta=3.0,
),
astmodels.PowerLaw1D(amplitude=10.0, x_0=0.5, alpha=2.0),
astmodels.SmoothlyBrokenPowerLaw1D(
amplitude=10.0, x_break=5.0, alpha_1=2.0, alpha_2=3.0, delta=0.5
),
custom_and_analytical_inverse(),
custom_inputs_outputs(),
]
if HAS_SCIPY:
test_models.append(
astmodels.Spline1D(
np.array([-3.0, -3.0, -3.0, -3.0, -1.0, 0.0, 1.0, 3.0, 3.0, 3.0, 3.0]),
np.array(
[
0.10412331,
0.07013616,
-0.18799552,
1.35953147,
-0.15282581,
0.03923,
-0.04297299,
0.0,
0.0,
0.0,
0.0,
]
),
3,
)
)
math_models = []
for kl in astmodels.math.__all__:
klass = getattr(astmodels.math, kl)
math_models.append(klass())
test_models.extend(math_models)
test_models_with_constraints = [
astmodels.Legendre2D(
x_degree=1,
y_degree=1,
c0_0=1,
c0_1=2,
c1_0=3,
fixed={"c1_0": True, "c0_1": True},
bounds={"c0_0": (-10, 10)},
)
]
test_models.extend(test_models_with_constraints)
def test_transforms_compound(tmpdir):
tree = {
"compound": astmodels.Shift(1) & astmodels.Shift(2)
| astmodels.Sky2Pix_TAN()
| astmodels.Rotation2D()
| astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32])
+ astmodels.Rotation2D(32)
}
assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {"rotation": rotation, "real_rotation": real_rotation}
def check(ff):
assert ff.tree["rotation"].inverse.angle == 45
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize("model", test_models)
def test_single_model(tmpdir, model):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.6.0 which causes warnings
if Version(asdf.__version__) <= Version("2.6.0"):
warnings.filterwarnings("ignore", "Unable to locate schema file")
tree = {"single_model": model}
assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree["rot"].name == "foo"
tree = {"rot": astmodels.Rotation2D(23, name="foo")}
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {"azp": astmodels.Sky2Pix_AZP(0.5, 0.3)}
assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree["model"].name == "compound_model"
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename("compound_model")
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
@pytest.mark.slow
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
"forward": util.resolve_name(
f"astropy.modeling.projections.Sky2Pix_{name}"
)(),
"backward": util.resolve_name(
f"astropy.modeling.projections.Pix2Sky_{name}"
)(),
}
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version("2.5.1"):
warnings.filterwarnings("ignore", "Unable to locate schema file")
assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1.0, 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 0.0]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(
points,
lookup_table=table,
bounds_error=False,
fill_value=None,
method="nearest",
)
tree = {"model": model2}
assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
def test_const1d(tmpdir, standard_version):
assert_roundtrip_tree(
{"model": astmodels.Const1D(amplitude=5.0)},
tmpdir,
init_options={"version": standard_version},
)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
@pytest.mark.parametrize(
"model",
[
astmodels.Polynomial1D(1, c0=5, c1=17),
astmodels.Polynomial1D(1, c0=5, c1=17, domain=[-5, 4], window=[-2, 3]),
astmodels.Polynomial2D(2, c0_0=3, c1_0=5, c0_1=7),
astmodels.Polynomial2D(
2,
c0_0=3,
c1_0=5,
c0_1=7,
x_domain=[-2, 2],
y_domain=[-4, 4],
x_window=[-6, 6],
y_window=[-8, 8],
),
],
)
def test_polynomial(tmpdir, standard_version, model):
assert_roundtrip_tree(
{"model": model}, tmpdir, init_options={"version": standard_version}
)
def test_domain_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5, domain=[-2, 2])
model2d = astmodels.Chebyshev2D(
1, 1, c0_0=1, c0_1=2, c1_0=3, x_domain=[-2, 2], y_domain=[-2, 2]
)
fa = AsdfFile()
fa.tree["model1d"] = model1d
fa.tree["model2d"] = model2d
file_path = str(tmpdir.join("orthopoly_domain.asdf"))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree["model1d"](1.8) == model1d(1.8)
assert f.tree["model2d"](1.8, -1.5) == model2d(1.8, -1.5)
def test_window_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(
2, c0=2, c1=3, c2=0.5, domain=[-2, 2], window=[-0.5, 0.5]
)
model2d = astmodels.Chebyshev2D(
1,
1,
c0_0=1,
c0_1=2,
c1_0=3,
x_domain=[-2, 2],
y_domain=[-2, 2],
x_window=[-0.5, 0.5],
y_window=[-0.1, 0.5],
)
fa = AsdfFile()
fa.tree["model1d"] = model1d
fa.tree["model2d"] = model2d
file_path = str(tmpdir.join("orthopoly_window.asdf"))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree["model1d"](1.8) == model1d(1.8)
assert f.tree["model2d"](1.8, -1.5) == model2d(1.8, -1.5)
def test_linear1d(tmpdir):
model = astmodels.Linear1D()
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir)
def test_linear1d_quantity(tmpdir):
model = astmodels.Linear1D(1 * u.nm, 1 * (u.nm / u.pixel))
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model_units(tmpdir):
points = np.arange(0, 5) * u.pix
values = [1.0, 10, 2, 45, -3] * u.nm
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 0.0]]) * u.nm
points = ([1, 2, 3], [1, 2, 3]) * u.pix
model2 = astmodels.Tabular2D(
points,
lookup_table=table,
bounds_error=False,
fill_value=None,
method="nearest",
)
tree = {"model": model2}
assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs(tmpdir):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version("2.5.1"):
warnings.filterwarnings("ignore", "Unable to locate schema file")
model0 = astmodels.Pix2Sky_TAN()
model0.input_units_equivalencies = {
"x": u.dimensionless_angles(),
"y": u.dimensionless_angles(),
}
model1 = astmodels.Rotation2D()
model = model0 | model1
tree = {
"compound": fix_inputs(model, {"x": 45}),
"compound1": fix_inputs(model, {0: 45}),
}
assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs_type(tmpdir):
with pytest.raises(TypeError):
tree = {"compound": fix_inputs(3, {"x": 45})}
assert_roundtrip_tree(tree, tmpdir)
with pytest.raises(AttributeError):
tree = {"compound": astmodels.Pix2Sky_TAN() & {"x": 45}}
assert_roundtrip_tree(tree, tmpdir)
comp_model = custom_and_analytical_inverse()
@pytest.mark.parametrize(
"model",
[
astmodels.Shift(1) & astmodels.Shift(2) | comp_model,
comp_model | astmodels.Shift(1) & astmodels.Shift(2),
astmodels.Shift(1) & comp_model,
comp_model & astmodels.Shift(1),
],
)
def test_custom_and_analytical(model, tmpdir):
fa = AsdfFile()
fa.tree["model"] = model
file_path = str(tmpdir.join("custom_and_analytical_inverse.asdf"))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree["model"].inverse is not None
def test_deserialize_compound_user_inverse(tmpdir):
"""
Confirm that we are able to correctly reconstruct a
compound model with a user inverse set on one of its
component models.
Due to code in TransformType that facilitates circular
inverses, the user inverse of the component model is
not available at the time that the CompoundModel is
constructed.
"""
yaml = """
model: !transform/concatenate-1.2.0
forward:
- !transform/shift-1.2.0
inverse: !transform/shift-1.2.0 {offset: 5.0}
offset: -10.0
- !transform/shift-1.2.0 {offset: -20.0}
"""
buff = yaml_to_asdf(yaml)
with asdf.open(buff) as af:
model = af["model"]
assert model.has_inverse()
assert model.inverse(-5, -20) == (0, 0)
# test some models and compound models with some input unit equivalencies
def models_with_input_eq():
# 1D model
m1 = astmodels.Shift(1 * u.kg)
m1.input_units_equivalencies = {"x": u.mass_energy()}
# 2D model
m2 = astmodels.Const2D(10 * u.Hz)
m2.input_units_equivalencies = {
"x": u.dimensionless_angles(),
"y": u.dimensionless_angles(),
}
# 2D model with only one input equivalencies
m3 = astmodels.Const2D(10 * u.Hz)
m3.input_units_equivalencies = {"x": u.dimensionless_angles()}
# model using equivalency that has args using units
m4 = astmodels.PowerLaw1D(amplitude=1 * u.m, x_0=10 * u.pix, alpha=7)
m4.input_units_equivalencies = {
"x": u.equivalencies.pixel_scale(0.5 * u.arcsec / u.pix)
}
return [m1, m2, m3, m4]
def compound_models_with_input_eq():
m1 = astmodels.Gaussian1D(10 * u.K, 11 * u.arcsec, 12 * u.arcsec)
m1.input_units_equivalencies = {"x": u.parallax()}
m2 = astmodels.Gaussian1D(5 * u.s, 2 * u.K, 3 * u.K)
m2.input_units_equivalencies = {"x": u.temperature()}
return [m1 | m2, m1 & m2, m1 + m2]
test_models.extend(models_with_input_eq())
test_models.extend(compound_models_with_input_eq())
|
a750409b8f35363589df35227fcf9f24c5cf5b32908f0ac24962ca63920a95ad | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.time import Time, TimeDelta
@pytest.mark.parametrize("fmt", TimeDelta.FORMATS.keys())
def test_timedelta(fmt, tmpdir):
t1 = Time(Time.now())
t2 = Time(Time.now())
td = TimeDelta(t2 - t1, format=fmt)
tree = dict(timedelta=td)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("scale", list(TimeDelta.SCALES) + [None])
def test_timedelta_scales(scale, tmpdir):
tree = dict(timedelta=TimeDelta(0.125, scale=scale, format="jd"))
assert_roundtrip_tree(tree, tmpdir)
def test_timedelta_vector(tmpdir):
tree = dict(timedelta=TimeDelta([1, 2] * u.day))
assert_roundtrip_tree(tree, tmpdir)
|
a4a2972bcef20fffa8a0d566fdef5c435da0721d58770792a8e5f5d279801b69 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
asdf = pytest.importorskip("asdf")
import datetime
import asdf.schema as asdf_schema
import numpy as np
from asdf import AsdfFile, tagged, yamlutil
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import time
def _flatten_combiners(schema):
newschema = dict()
def add_entry(path, schema, combiner):
# TODO: Simplify?
cursor = newschema
for i in range(len(path)):
part = path[i]
if isinstance(part, int):
cursor = cursor.setdefault("items", [])
while len(cursor) <= part:
cursor.append({})
cursor = cursor[part]
elif part == "items":
cursor = cursor.setdefault("items", dict())
else:
cursor = cursor.setdefault("properties", dict())
if i < len(path) - 1 and isinstance(path[i + 1], int):
cursor = cursor.setdefault(part, [])
else:
cursor = cursor.setdefault(part, dict())
cursor.update(schema)
def test_time(tmpdir):
time_array = time.Time(np.arange(100), format="unix")
tree = {"large_time_array": time_array}
assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location(tmpdir):
# See https://github.com/spacetelescope/asdf/issues/341
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = time.Time([1, 2], location=location, format="cxcsec")
tree = {"time": t}
assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location_1_0_0(tmpdir):
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=6378100 * u.m, y=0 * u.m, z=0 * u.m)
t = time.Time("J2000.000", location=location, format="jyear_str")
tree = {"time": t}
# The version refers to ASDF Standard 1.0.0, which includes time-1.0.0
assert_roundtrip_tree(tree, tmpdir, init_options={"version": "1.0.0"})
def test_isot(tmpdir):
isot = time.Time("2000-01-01T00:00:00.000")
tree = {"time": isot}
assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
if isinstance(tree["time"], str):
assert str(tree["time"]) == isot.value
elif isinstance(tree["time"], dict):
assert str(tree["time"]["value"]) == isot.value
assert str(tree["time"]["base_format"]) == "isot"
else:
assert False
def test_isot_array(tmpdir):
tree = {"time": time.Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"])}
assert_roundtrip_tree(tree, tmpdir)
def test_time_tag():
schema = asdf_schema.load_schema(
"http://stsci.edu/schemas/asdf/time/time-1.1.0", resolve_references=True
)
schema = _flatten_combiners(schema)
date = time.Time(datetime.datetime.now())
tree = {"date": date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree["date"], asdf)
asdf_schema.validate(instance, schema=schema)
tag = "tag:stsci.edu:asdf/time/time-1.1.0"
date = tagged.tag_object(tag, date)
tree = {"date": date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree["date"], asdf)
asdf_schema.validate(instance, schema=schema)
|
61ea781e5dfdbee01d33c3e88dcb963405f0624b33a165f8629e49e84a88f046 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units
from astropy.coordinates import FK5, ICRS, Angle, Latitude, Longitude
def test_hcrs_basic(tmpdir):
ra = Longitude(25, unit=units.deg)
dec = Latitude(45, unit=units.deg)
tree = {"coord": ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_basic(tmpdir):
wrap_angle = Angle(1.5, unit=units.rad)
ra = Longitude(25, unit=units.deg, wrap_angle=wrap_angle)
dec = Latitude(45, unit=units.deg)
tree = {"coord": ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_nodata(tmpdir):
tree = {"coord": ICRS()}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_compound(tmpdir):
icrs = ICRS(ra=[0, 1, 2] * units.deg, dec=[3, 4, 5] * units.deg)
tree = {"coord": icrs}
assert_roundtrip_tree(tree, tmpdir)
def test_fk5_time(tmpdir):
tree = {"coord": FK5(equinox="2011-01-01T00:00:00")}
assert_roundtrip_tree(tree, tmpdir)
|
dc17d0c9f029a45ccd4ee4f60eade2b9dae64d4ae25b38b07fe01a73fefec09b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.coordinates.angles import Latitude, Longitude
from astropy.coordinates.earth import ELLIPSOIDS, EarthLocation
@pytest.fixture
def position():
lon = Longitude(
[0.0, 45.0, 90.0, 135.0, 180.0, -180, -90, -45], u.deg, wrap_angle=180 * u.deg
)
lat = Latitude([+0.0, 30.0, 60.0, +90.0, -90.0, -60.0, -30.0, 0.0], u.deg)
h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11.0, -0.1], u.m)
return lon, lat, h
def test_earthlocation_quantity(tmpdir):
location = EarthLocation(
lat=34.4900 * u.deg, lon=-104.221800 * u.deg, height=40 * u.km
)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation(position, tmpdir):
x, y, z = EarthLocation.from_geodetic(*position).to_geocentric()
geocentric = EarthLocation(x, y, z)
tree = dict(location=geocentric)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("ellipsoid", ELLIPSOIDS)
def test_earthlocation_geodetic(position, ellipsoid, tmpdir):
location = EarthLocation.from_geodetic(*position, ellipsoid=ellipsoid)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation_site(tmpdir):
orig_sites = getattr(EarthLocation, "_site_registry", None)
try:
EarthLocation._get_site_registry(force_builtin=True)
rog = EarthLocation.of_site("greenwich")
tree = dict(location=rog)
assert_roundtrip_tree(tree, tmpdir)
finally:
EarthLocation._site_registry = orig_sites
|
1324c8911a5ee89603d5d782bc0bf8b66e13b7e5f80788f28b3020ffe8f84691 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import FK4, ICRS, Galactic, Longitude, SkyCoord
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
# These tests are cribbed directly from the Examples section of
# https://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html
def test_scalar_skycoord(tmpdir):
c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_vector_skycoord(tmpdir):
c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_fk4(tmpdir):
coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
c = SkyCoord(coords, frame=FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize(
"coord",
[
SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic), # Units from string
SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s"),
],
)
def test_skycoord_galactic(coord, tmpdir):
tree = dict(coord=coord)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_ra_dec(tmpdir):
ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
c = SkyCoord(ra, dec, frame="icrs")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime="2001-01-02T12:34:56")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_override_defaults(tmpdir):
c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
c = SkyCoord(c, obstime="J2010.11", equinox="B1965") # Override defaults
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_cartesian(tmpdir):
c = SkyCoord(
w=0, u=1, v=2, unit="kpc", frame="galactic", representation_type="cartesian"
)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_vector_frames(tmpdir):
c = SkyCoord([ICRS(ra=1 * u.deg, dec=2 * u.deg), ICRS(ra=3 * u.deg, dec=4 * u.deg)])
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason="Velocities are not properly serialized yet")
def test_skycoord_radial_velocity(tmpdir):
c = SkyCoord(ra=1 * u.deg, dec=2 * u.deg, radial_velocity=10 * u.km / u.s)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason="Velocities are not properly serialized yet")
def test_skycoord_proper_motion(tmpdir):
c = SkyCoord(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=2 * u.mas / u.yr,
pm_dec=1 * u.mas / u.yr,
)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.skip(reason="Apparent loss of precision during serialization")
def test_skycoord_extra_attribute(tmpdir):
sc = SkyCoord(10 * u.deg, 20 * u.deg, equinox="2011-01-01T00:00", frame="fk4")
tree = dict(coord=sc.transform_to("icrs"))
def check_asdf(asdffile):
assert hasattr(asdffile["coord"], "equinox")
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check_asdf)
def test_skycoord_2d_obstime(tmpdir):
sc = (
SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,m",
frame="fk4",
obstime=["J1990.5", "J1991.5"],
),
)
tree = dict(coord=sc)
assert_roundtrip_tree(tree, tmpdir)
|
5f06853d868b0c25d64dccd0a6cf7688106a104eb97ea3b294647566e0eae533 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
import astropy.units as u
from astropy.coordinates import Angle, Latitude, Longitude
def test_angle(tmpdir):
tree = {"angle": Angle(100, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_latitude(tmpdir):
tree = {"angle": Latitude(10, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_longitude(tmpdir):
tree = {"angle": Longitude(-100, u.deg, wrap_angle=180 * u.deg)}
assert_roundtrip_tree(tree, tmpdir)
|
fa7df2facf1f7ba7bcd1889d51b68fdcafbbaa1d66e5877f29eecffdea31371e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
from numpy.random import randint, random
import astropy.coordinates.representation as r
import astropy.units as u
from astropy.coordinates import Angle
@pytest.fixture(params=filter(lambda x: "Base" not in x, r.__all__))
def representation(request):
rep = getattr(r, request.param)
angle_unit = u.deg
other_unit = u.km
kwargs = {}
arr_len = randint(1, 100)
for aname, atype in rep.attr_classes.items():
if issubclass(atype, Angle):
value = ([random()] * arr_len) * angle_unit
else:
value = ([random()] * arr_len) * other_unit
kwargs[aname] = value
return rep(**kwargs)
def test_representations(tmpdir, representation):
tree = {"representation": representation}
assert_roundtrip_tree(tree, tmpdir)
|
212932dfe764083d6bf69f5f3f702d0ae1ffdb9ea4427daf090e9e6a71b40261 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
from astropy import units as u
from astropy.coordinates import ICRS, Galactic, SpectralCoord
from astropy.tests.helper import assert_quantity_allclose
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
def test_scalar_spectralcoord(tmpdir):
sc = SpectralCoord(565 * u.nm)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile["spectralcoord"], SpectralCoord)
assert_quantity_allclose(asdffile["spectralcoord"].quantity, 565 * u.nm)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_vector_spectralcoord(tmpdir):
sc = SpectralCoord([100, 200, 300] * u.GHz)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile["spectralcoord"], SpectralCoord)
assert_quantity_allclose(
asdffile["spectralcoord"].quantity, [100, 200, 300] * u.GHz
)
assert_roundtrip_tree(
tree, tmpdir, asdf_check_func=check, tree_match_func=assert_quantity_allclose
)
@pytest.mark.filterwarnings("ignore:No velocity")
def test_spectralcoord_with_obstarget(tmpdir):
sc = SpectralCoord(
10 * u.GHz,
observer=ICRS(1 * u.km, 2 * u.km, 3 * u.km, representation_type="cartesian"),
target=Galactic(10 * u.deg, 20 * u.deg, distance=30 * u.pc),
)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile["spectralcoord"], SpectralCoord)
assert_quantity_allclose(asdffile["spectralcoord"].quantity, 10 * u.GHz)
assert isinstance(asdffile["spectralcoord"].observer, ICRS)
assert isinstance(asdffile["spectralcoord"].target, Galactic)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
|
e52ad8c178a1f68471a8b93516a0d20fb73b35fab7a4e74a28b3445420c11a6a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import warnings
import pytest
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import yaml_to_asdf
from astropy import units as u
# TODO: Implement defunit
def test_unit():
yaml = """
unit: !unit/unit-1.0.0 "2.1798721 10-18kg m2 s-2"
"""
buff = yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert ff.tree["unit"].is_equivalent(u.Ry)
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert ff.tree["unit"].is_equivalent(u.Ry)
|
438593a2b4d11e8c5966dc613e9362003b6598b980bea5641122d614c8295060 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import warnings
import pytest
asdf = pytest.importorskip("asdf")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import yaml_to_asdf
from astropy import units
def roundtrip_quantity(yaml, quantity):
buff = yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert (ff.tree["quantity"] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert (ff.tree["quantity"] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = f"""
quantity: !unit/quantity-1.1.0
value: {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x * 2.3081 for x in range(10)]
testunit = units.ampere
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1, 2, 3], [4, 5, 6]]
testunit = units.km
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{testval}
unit: {testunit}
"""
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
|
dcd31db00786a2ab1d6887e5708ce2497197b2e08a9af75038a8a3a9713c6f10 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import pytest
from astropy import units as u
from astropy.cosmology import Planck15
from astropy.cosmology.units import with_H0
from astropy.units import equivalencies as eq
asdf = pytest.importorskip("asdf", minversion="2.3.0.dev0")
from asdf.exceptions import AsdfDeprecationWarning
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=AsdfDeprecationWarning,
message=r"asdf.tests.helpers is deprecated.*",
)
from asdf.tests.helpers import assert_roundtrip_tree
def get_equivalencies():
"""
Return a list of example equivalencies for testing serialization.
"""
return [
eq.plate_scale(0.3 * u.deg / u.mm),
eq.pixel_scale(0.5 * u.deg / u.pix),
eq.pixel_scale(100.0 * u.pix / u.cm),
eq.spectral_density(350 * u.nm, factor=2),
eq.spectral_density(350 * u.nm),
eq.spectral(),
eq.brightness_temperature(500 * u.GHz),
eq.brightness_temperature(500 * u.GHz, beam_area=23 * u.sr),
eq.temperature_energy(),
eq.temperature(),
eq.thermodynamic_temperature(300 * u.Hz),
eq.thermodynamic_temperature(140 * u.GHz, Planck15.Tcmb0),
eq.beam_angular_area(3 * u.sr),
eq.mass_energy(),
eq.molar_mass_amu(),
eq.doppler_relativistic(2 * u.m),
eq.doppler_optical(2 * u.nm),
eq.doppler_radio(2 * u.Hz),
eq.parallax(),
eq.logarithmic(),
eq.dimensionless_angles(),
eq.spectral() + eq.temperature(),
(
eq.spectral_density(35 * u.nm)
+ eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr)
),
(
eq.spectral()
+ eq.spectral_density(35 * u.nm)
+ eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr)
),
with_H0(),
]
@pytest.mark.parametrize("equiv", get_equivalencies())
@pytest.mark.filterwarnings(
"ignore:`with_H0` is deprecated from `astropy.units.equivalencies` "
"since astropy 5.0 and may be removed in a future version. "
"Use `astropy.cosmology.units.with_H0` instead."
)
def test_equivalencies(tmpdir, equiv):
tree = {"equiv": equiv}
assert_roundtrip_tree(tree, tmpdir)
|
ef58f28fb6be9567af315037adecd0786ca9b4d49e687e15ea4391dd38d2b5a2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import gzip
import io
import pathlib
import sys
from unittest import mock
import numpy as np
# THIRD-PARTY
import pytest
from numpy.testing import assert_array_equal
from astropy.io.votable import tree
from astropy.io.votable.exceptions import W39, VOTableSpecError, VOWarning
# LOCAL
from astropy.io.votable.table import parse, parse_single_table, validate
from astropy.io.votable.xmlutil import validate_schema
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames
# Determine the kind of float formatting in this build of Python
if hasattr(sys, "float_repr_style"):
legacy_float_repr = sys.float_repr_style == "legacy"
else:
legacy_float_repr = sys.platform.startswith("win")
def assert_validate_schema(filename, version):
if sys.platform.startswith("win"):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, "File did not validate against VOTable schema"
def test_parse_single_table():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table = parse_single_table(get_pkg_data_filename("data/regression.xml"))
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table2 = parse_single_table(
get_pkg_data_filename("data/regression.xml"), table_number=1
)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
def test_parse_single_table3():
with pytest.raises(IndexError):
parse_single_table(get_pkg_data_filename("data/regression.xml"), table_number=3)
def _test_regression(tmp_path, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename("data/regression.xml"),
_debug_python_based_parser=_python_based,
)
table = votable.get_first_table()
dtypes = [
(("string test", "string_test"), "|O8"),
(("fixed string test", "string_test_2"), "<U10"),
("unicode_test", "|O8"),
(("unicode test", "fixed_unicode_test"), "<U10"),
(("string array test", "string_array_test"), "<U4"),
("unsignedByte", "|u1"),
("short", "<i2"),
("int", "<i4"),
("long", "<i8"),
("double", "<f8"),
("float", "<f4"),
("array", "|O8"),
("bit", "|b1"),
("bitarray", "|b1", (3, 2)),
("bitvararray", "|O8"),
("bitvararray2", "|O8"),
("floatComplex", "<c8"),
("doubleComplex", "<c16"),
("doubleComplexArray", "|O8"),
("doubleComplexArrayFixed", "<c16", (2,)),
("boolean", "|b1"),
("booleanArray", "|b1", (4,)),
("nulls", "<i4"),
("nulls_array", "<i4", (2, 2)),
("precision1", "<f8"),
("precision2", "<f8"),
("doublearray", "|O8"),
("bitarray2", "|b1", (16,)),
]
if sys.byteorder == "big":
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace("<", ">")
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(
str(tmp_path / "regression.tabledata.xml"),
_debug_python_based_parser=_python_based,
)
assert_validate_schema(str(tmp_path / "regression.tabledata.xml"), votable.version)
if binary_mode == 1:
votable.get_first_table().format = "binary"
votable.version = "1.1"
elif binary_mode == 2:
votable.get_first_table()._config["version_1_3_or_later"] = True
votable.get_first_table().format = "binary2"
votable.version = "1.3"
# Also try passing a file handle
with open(str(tmp_path / "regression.binary.xml"), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmp_path / "regression.binary.xml"), votable.version)
# Also try passing a file handle
with open(str(tmp_path / "regression.binary.xml"), "rb") as fd:
votable2 = parse(fd, _debug_python_based_parser=_python_based)
votable2.get_first_table().format = "tabledata"
votable2.to_xml(
str(tmp_path / "regression.bin.tabledata.xml"),
_astropy_version="testing",
_debug_python_based_parser=_python_based,
)
assert_validate_schema(
str(tmp_path / "regression.bin.tabledata.xml"), votable.version
)
with open(
get_pkg_data_filename(
f"data/regression.bin.tabledata.truth.{votable.version}.xml"
),
encoding="utf-8",
) as fd:
truth = fd.readlines()
with open(str(tmp_path / "regression.bin.tabledata.xml"), encoding="utf-8") as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmp_path / "regression.bin.tabledata.xml.gz"),
_astropy_version="testing",
_debug_python_based_parser=_python_based,
)
with gzip.GzipFile(str(tmp_path / "regression.bin.tabledata.xml.gz"), "rb") as gzfd:
output = gzfd.readlines()
output = [x.decode("utf-8").rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail("legacy_float_repr")
def test_regression(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, False)
@pytest.mark.xfail("legacy_float_repr")
def test_regression_python_based_parser(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, True)
@pytest.mark.xfail("legacy_float_repr")
def test_regression_binary2(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, False, 2)
class TestFixups:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.table = parse(
get_pkg_data_filename("data/regression.xml")
).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array["string_test_2"], self.array["fixed string test"])
class TestReferences:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == "boolean"
assert fieldref.get_ref().datatype == "boolean"
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == "INPUT"
assert paramref.get_ref().datatype == "float"
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
columns = ["string_test", "unsignedByte", "bitarray"]
for c in columns:
assert not np.all(mask[c])
assert np.all(mask["unicode_test"])
def test_select_columns_by_name():
columns = ["string_test", "unsignedByte", "bitarray"]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask["unicode_test"])
class TestParse:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array["string_test"].dtype.type, np.object_)
assert_array_equal(
self.array["string_test"],
["String & test", "String & test", "XXXX", "", ""],
)
def test_fixed_string_test(self):
assert issubclass(self.array["string_test_2"].dtype.type, np.unicode_)
assert_array_equal(
self.array["string_test_2"], ["Fixed stri", "0123456789", "XXXX", "", ""]
)
def test_unicode_test(self):
assert issubclass(self.array["unicode_test"].dtype.type, np.object_)
assert_array_equal(
self.array["unicode_test"],
["Ceçi n'est pas un pipe", "வணக்கம்", "XXXX", "", ""],
)
def test_fixed_unicode_test(self):
assert issubclass(self.array["fixed_unicode_test"].dtype.type, np.unicode_)
assert_array_equal(
self.array["fixed_unicode_test"],
["Ceçi n'est", "வணக்கம்", "0123456789", "", ""],
)
def test_unsignedByte(self):
assert issubclass(self.array["unsignedByte"].dtype.type, np.uint8)
assert_array_equal(self.array["unsignedByte"], [128, 255, 0, 255, 255])
assert not np.any(self.mask["unsignedByte"])
def test_short(self):
assert issubclass(self.array["short"].dtype.type, np.int16)
assert_array_equal(self.array["short"], [4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask["short"])
def test_int(self):
assert issubclass(self.array["int"].dtype.type, np.int32)
assert_array_equal(
self.array["int"], [268435456, 2147483647, -268435456, 268435455, 123456789]
)
assert_array_equal(self.mask["int"], [False, False, False, False, True])
def test_long(self):
assert issubclass(self.array["long"].dtype.type, np.int64)
assert_array_equal(
self.array["long"],
[
922337203685477,
123456789,
-1152921504606846976,
1152921504606846975,
123456789,
],
)
assert_array_equal(self.mask["long"], [False, True, False, False, True])
def test_double(self):
assert issubclass(self.array["double"].dtype.type, np.float64)
assert_array_equal(
self.array["double"], [8.9990234375, 0.0, np.inf, np.nan, -np.inf]
)
assert_array_equal(self.mask["double"], [False, False, False, True, False])
def test_float(self):
assert issubclass(self.array["float"].dtype.type, np.float32)
assert_array_equal(self.array["float"], [1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask["float"], [False, False, False, False, True])
def test_array(self):
assert issubclass(self.array["array"].dtype.type, np.object_)
match = [
[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]],
]
for a, b in zip(self.array["array"], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data["array"][3].mask[0][0]
assert self.array.data["array"][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array["bit"].dtype.type, np.bool_)
assert_array_equal(self.array["bit"], [True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array["bitarray"].dtype.type, np.bool_)
assert self.array["bitarray"].shape == (5, 3, 2)
assert_array_equal(
self.array["bitarray"],
[
[[True, False], [True, True], [False, True]],
[[False, True], [False, False], [True, True]],
[[True, True], [True, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
],
)
def test_bitarray_mask(self):
assert_array_equal(
self.mask["bitarray"],
[
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[True, True], [True, True], [True, True]],
[[True, True], [True, True], [True, True]],
],
)
def test_bitvararray(self):
assert issubclass(self.array["bitvararray"].dtype.type, np.object_)
match = [
[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[],
[],
]
for a, b in zip(self.array["bitvararray"], match):
assert_array_equal(a, b)
match_mask = [
[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False,
False,
]
for a, b in zip(self.array["bitvararray"], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array["bitvararray2"].dtype.type, np.object_)
match = [
[],
[
[[False, True], [False, False], [True, False]],
[[True, False], [True, False], [True, False]],
],
[[[True, True], [True, True], [True, True]]],
[],
[],
]
for a, b in zip(self.array["bitvararray2"], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array["floatComplex"].dtype.type, np.complex64)
assert_array_equal(
self.array["floatComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + 0j, np.nan + 0j],
)
assert_array_equal(self.mask["floatComplex"], [True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array["doubleComplex"].dtype.type, np.complex128)
assert_array_equal(
self.array["doubleComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + (np.inf * 1j), np.nan + 0j],
)
assert_array_equal(self.mask["doubleComplex"], [True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array["doubleComplexArray"].dtype.type, np.object_)
assert [len(x) for x in self.array["doubleComplexArray"]] == [0, 2, 2, 0, 0]
def test_boolean(self):
assert issubclass(self.array["boolean"].dtype.type, np.bool_)
assert_array_equal(self.array["boolean"], [True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask["boolean"], [False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array["booleanArray"].dtype.type, np.bool_)
assert_array_equal(
self.array["booleanArray"],
[
[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False],
],
)
def test_boolean_array_mask(self):
assert_array_equal(
self.mask["booleanArray"],
[
[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True],
],
)
def test_nulls(self):
assert_array_equal(self.array["nulls"], [0, -9, 2, -9, -9])
assert_array_equal(self.mask["nulls"], [False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(
self.array["nulls_array"],
[
[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]],
],
)
assert_array_equal(
self.mask["nulls_array"],
[
[[True, True], [True, True]],
[[False, False], [False, False]],
[[True, False], [True, False]],
[[False, True], [False, True]],
[[True, True], [True, True]],
],
)
def test_double_array(self):
assert issubclass(self.array["doublearray"].dtype.type, np.object_)
assert len(self.array["doublearray"][0]) == 0
assert_array_equal(
self.array["doublearray"][1], [0, 1, np.inf, -np.inf, np.nan, 0, -1]
)
assert_array_equal(
self.array.data["doublearray"][1].mask,
[False, False, False, False, False, False, True],
)
def test_bit_array2(self):
assert_array_equal(
self.array["bitarray2"][0],
[
True,
True,
True,
True,
False,
False,
False,
False,
True,
True,
True,
True,
False,
False,
False,
False,
],
)
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"][0])
assert np.all(self.mask["bitarray2"][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id("J2000")
assert coosys.system == "eq_FK5"
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id("QUERY_STATUS")
assert info.value == "OK"
if self.votable.version != "1.1":
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..."
def test_repr(self):
assert "3 tables" in repr(self.votable)
assert (
repr(list(self.votable.iter_fields_and_params())[0])
== '<PARAM ID="awesome" arraysize="*" datatype="float" '
'name="INPUT" unit="deg" value="[0.0 0.0]"/>'
)
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == "[</>]"
class TestThroughTableData(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask["bitarray"])
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"])
def test_schema(self, tmp_path):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = tmp_path / "test_through_tabledata.xml"
with open(fn, "wb") as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, "1.1")
class TestThroughBinary(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
votable.get_first_table().format = "binary"
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask["bit"])
def test_bitarray_mask(self):
assert not np.any(self.mask["bitarray"])
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"])
class TestThroughBinary2(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
votable.version = "1.3"
votable.get_first_table()._config["version_1_3_or_later"] = True
votable.get_first_table().format = "binary2"
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from astropy.io.votable.tree import Field, Resource, Table, VOTableFile
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2"),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
# https://github.com/astropy/astropy/issues/13341
@np.errstate(over="ignore")
def test_open_files():
for filename in get_pkg_data_filenames("data", pattern="*.xml"):
if filename.endswith("custom_datatype.xml") or filename.endswith(
"timesys_errors.xml"
):
continue
parse(filename)
def test_too_many_columns():
with pytest.raises(VOTableSpecError):
parse(get_pkg_data_filename("data/too_many_columns.xml.gz"))
def test_build_from_scratch(tmp_path):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
tree.Field(
votable, ID="filename", name="filename", datatype="char", arraysize="1"
),
tree.Field(
votable, ID="matrix", name="matrix", datatype="double", arraysize="2x2"
),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmp_path / "new_votable.xml"))
votable = parse(str(tmp_path / "new_votable.xml"))
table = votable.get_first_table()
assert_array_equal(
table.array.mask,
np.array(
[
(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]]),
],
dtype=[("filename", "?"), ("matrix", "?", (2, 2))],
),
)
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename("data/regression.xml")
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(fpath, output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(get_pkg_data_filename("data/validation.txt"), encoding="utf-8") as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
@mock.patch("subprocess.Popen")
def test_validate_xmllint_true(mock_subproc_popen):
process_mock = mock.Mock()
attrs = {"communicate.return_value": ("ok", "ko"), "returncode": 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
assert validate(get_pkg_data_filename("data/empty_table.xml"), xmllint=True)
def test_validate_path_object():
"""Validating when source is passed as path object (#4412)."""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmp_path):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
# W39: Bit values can not be masked
with pytest.warns(W39):
with open(tmp_path / "regression.compressed.xml", "wb") as fd:
votable.to_xml(fd, compressed=True, _astropy_version="testing")
with open(tmp_path / "regression.compressed.xml", "rb") as fd:
votable = parse(fd)
def test_from_scratch_example():
_run_test_from_scratch_example()
def _run_test_from_scratch_example():
from astropy.io.votable.tree import Field, Resource, Table, VOTableFile
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2"),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == "test1.xml"
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from astropy.utils.xml import iterparser
filename = get_pkg_data_filename("data/regression.xml")
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == "win32":
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from astropy import units as u
votable = parse(get_pkg_data_filename("data/nonstandard_units.xml"))
assert isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(
get_pkg_data_filename("data/nonstandard_units.xml"), unit_format="generic"
)
assert not isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = "t2"
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(
get_pkg_data_filename("data/no_resource.xml"), output, xmllint=False
)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(get_pkg_data_filename("data/no_resource.txt"), encoding="utf-8") as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(
get_pkg_data_filename("data/custom_datatype.xml"),
datatype_mapping={"bar": "int"},
)
table = votable.get_first_table()
assert table.array.dtype["foo"] == np.int32
def _timesys_tests(votable):
assert len(list(votable.iter_timesys())) == 4
timesys = votable.get_timesys_by_id("time_frame")
assert timesys.timeorigin == 2455197.5
assert timesys.timescale == "TCB"
assert timesys.refposition == "BARYCENTER"
timesys = votable.get_timesys_by_id("mjd_origin")
assert timesys.timeorigin == "MJD-origin"
assert timesys.timescale == "TDB"
assert timesys.refposition == "EMBARYCENTER"
timesys = votable.get_timesys_by_id("jd_origin")
assert timesys.timeorigin == "JD-origin"
assert timesys.timescale == "TT"
assert timesys.refposition == "HELIOCENTER"
timesys = votable.get_timesys_by_id("no_origin")
assert timesys.timeorigin is None
assert timesys.timescale == "UTC"
assert timesys.refposition == "TOPOCENTER"
def test_timesys():
votable = parse(get_pkg_data_filename("data/timesys.xml"))
_timesys_tests(votable)
def test_timesys_roundtrip():
orig_votable = parse(get_pkg_data_filename("data/timesys.xml"))
bio = io.BytesIO()
orig_votable.to_xml(bio)
bio.seek(0)
votable = parse(bio)
_timesys_tests(votable)
def test_timesys_errors():
output = io.StringIO()
validate(get_pkg_data_filename("data/timesys_errors.xml"), output, xmllint=False)
outstr = output.getvalue()
assert "E23: Invalid timeorigin attribute 'bad-origin'" in outstr
assert "E22: ID attribute is required for all TIMESYS elements" in outstr
assert "W48: Unknown attribute 'refposition_mispelled' on TIMESYS" in outstr
def test_get_infos_by_name():
vot = parse(
io.BytesIO(
b"""
<VOTABLE xmlns="http://www.ivoa.net/xml/VOTable/v1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="1.4">
<RESOURCE type="results">
<INFO name="creator-name" value="Cannon, A."/>
<INFO name="creator-name" value="Fleming, W."/>
</RESOURCE>
</VOTABLE>"""
)
)
infos = vot.get_infos_by_name("creator-name")
assert [i.value for i in infos] == ["Cannon, A.", "Fleming, W."]
|
6934a65848501f3f66d3b9a9c328d5104b4173bdecda833386f703d2755d8692 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the conversion to/from astropy.table.
"""
import io
import os
import pathlib
import numpy as np
import pytest
from astropy.config import reload_config, set_temp_config
from astropy.io.votable import conf, from_table, is_votable, tree, validate
from astropy.io.votable.exceptions import E25, W39, VOWarning
from astropy.io.votable.table import parse, writeto
from astropy.table import Column, Table
from astropy.table.table_helpers import simple_table
from astropy.units import Unit
from astropy.utils.data import (
get_pkg_data_filename,
get_pkg_data_fileobj,
get_pkg_data_path,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
@pytest.fixture
def home_is_data(monkeypatch):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory.
"""
path = get_pkg_data_path("data")
# For Unix
monkeypatch.setenv("HOME", path)
# For Windows
monkeypatch.setenv("USERPROFILE", path)
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
def test_table(tmp_path):
# Read the VOTABLE
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
table = votable.get_first_table()
astropy_table = table.to_table()
for name in table.array.dtype.names:
assert np.all(astropy_table.mask[name] == table.array.mask[name])
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
("string_test", {"datatype": "char", "arraysize": "*"}),
("string_test_2", {"datatype": "char", "arraysize": "10"}),
("unicode_test", {"datatype": "unicodeChar", "arraysize": "*"}),
("fixed_unicode_test", {"datatype": "unicodeChar", "arraysize": "10"}),
("string_array_test", {"datatype": "char", "arraysize": "4"}),
("unsignedByte", {"datatype": "unsignedByte"}),
("short", {"datatype": "short"}),
("int", {"datatype": "int"}),
("long", {"datatype": "long"}),
("double", {"datatype": "double"}),
("float", {"datatype": "float"}),
("array", {"datatype": "long", "arraysize": "2*"}),
("bit", {"datatype": "bit"}),
("bitarray", {"datatype": "bit", "arraysize": "3x2"}),
("bitvararray", {"datatype": "bit", "arraysize": "*"}),
("bitvararray2", {"datatype": "bit", "arraysize": "3x2*"}),
("floatComplex", {"datatype": "floatComplex"}),
("doubleComplex", {"datatype": "doubleComplex"}),
("doubleComplexArray", {"datatype": "doubleComplex", "arraysize": "*"}),
("doubleComplexArrayFixed", {"datatype": "doubleComplex", "arraysize": "2"}),
("boolean", {"datatype": "bit"}),
("booleanArray", {"datatype": "bit", "arraysize": "4"}),
("nulls", {"datatype": "int"}),
("nulls_array", {"datatype": "int", "arraysize": "2x2"}),
("precision1", {"datatype": "double"}),
("precision2", {"datatype": "double"}),
("doublearray", {"datatype": "double", "arraysize": "*"}),
("bitarray2", {"datatype": "bit", "arraysize": "16"}),
]
for field, (name, d) in zip(t.fields, field_types):
assert field.ID == name
assert (
field.datatype == d["datatype"]
), f'{name} expected {d["datatype"]} but get {field.datatype}'
if "arraysize" in d:
assert field.arraysize == d["arraysize"]
# W39: Bit values can not be masked
with pytest.warns(W39):
writeto(votable2, str(tmp_path / "through_table.xml"))
def test_read_from_tilde_path(home_is_data):
# Just test that these run without error for tilde-paths
path = os.path.join("~", "regression.xml")
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(path)
Table.read(path, format="votable", table_id="main_table")
def test_read_through_table_interface(tmp_path):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
with get_pkg_data_fileobj("data/regression.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable", table_id="main_table")
assert len(t) == 5
# Issue 8354
assert t["float"].format is None
fn = tmp_path / "table_interface.xml"
# W39: Bit values can not be masked
with pytest.warns(W39):
t.write(fn, table_id="FOO", format="votable")
with open(fn, "rb") as fd:
t2 = Table.read(fd, format="votable", table_id="FOO")
assert len(t2) == 5
def test_read_through_table_interface2():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
with get_pkg_data_fileobj("data/regression.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable", table_id="last_table")
assert len(t) == 0
def test_pass_kwargs_through_table_interface():
# Table.read() should pass on keyword arguments meant for parse()
filename = get_pkg_data_filename("data/nonstandard_units.xml")
t = Table.read(filename, format="votable", unit_format="generic")
assert t["Flux1"].unit == Unit("erg / (Angstrom cm2 s)")
def test_names_over_ids():
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=True)
assert table.colnames == [
"Name",
"GLON",
"GLAT",
"RAdeg",
"DEdeg",
"Jmag",
"Hmag",
"Kmag",
"G3.6mag",
"G4.5mag",
"G5.8mag",
"G8.0mag",
"4.5mag",
"8.0mag",
"Emag",
"24mag",
"f_Name",
]
def test_explicit_ids():
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=False)
assert table.colnames == [
"col1",
"col2",
"col3",
"col4",
"col5",
"col6",
"col7",
"col8",
"col9",
"col10",
"col11",
"col12",
"col13",
"col14",
"col15",
"col16",
"col17",
]
def test_table_read_with_unnamed_tables():
"""
Issue #927.
"""
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable")
assert len(t) == 1
def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename("data/names.xml"))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266
def test_from_table_without_mask():
t = Table()
c = Column(data=[1, 2, 3], name="a")
t.add_column(c)
output = io.BytesIO()
t.write(output, format="votable")
def test_write_with_format():
t = Table()
c = Column(data=[1, 2, 3], name="a")
t.add_column(c)
output = io.BytesIO()
t.write(output, format="votable", tabledata_format="binary")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b"BINARY" in obuff
assert b"TABLEDATA" not in obuff
output = io.BytesIO()
t.write(output, format="votable", tabledata_format="binary2")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b"BINARY2" in obuff
assert b"TABLEDATA" not in obuff
def test_write_overwrite(tmp_path):
t = simple_table(3, 3)
filename = tmp_path / "overwrite_test.vot"
t.write(filename, format="votable")
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format="votable")
t.write(filename, format="votable", overwrite=True)
def test_write_tilde_path(home_is_tmpdir):
fname = os.path.join("~", "output")
t = Table()
t["a"] = [1, 2, 3]
t.write(fname, format="votable", tabledata_format="binary")
# Ensure the tilde-prefixed path wasn't treated literally
assert not os.path.exists(fname)
with open(os.path.expanduser(fname)) as f:
obuff = f.read()
assert 'VOTABLE version="1.4"' in obuff
assert "BINARY" in obuff
assert "TABLEDATA" not in obuff
@pytest.mark.parametrize("path_format", ["plain", "tilde"])
def test_writeto(path_format, tmp_path, home_is_tmpdir):
if path_format == "plain":
# pathlib.Path objects are not accepted by votable.writeto, so convert
# to a string
fname = str(tmp_path / "writeto_test.vot")
else:
fname = os.path.join("~", "writeto_test.vot")
t = Table()
t["a"] = [1, 2, 3]
vt = from_table(t)
writeto(vt, fname)
if path_format == "tilde":
# Ensure the tilde-prefixed path wasn't treated literally
assert not os.path.exists(fname)
with open(os.path.expanduser(fname)) as f:
obuff = f.read()
assert 'VOTABLE version="1.4"' in obuff
assert "BINARY" not in obuff
assert "TABLEDATA" in obuff
def test_empty_table():
votable = parse(get_pkg_data_filename("data/empty_table.xml"))
table = votable.get_first_table()
table.to_table()
def test_no_field_not_empty_table():
votable = parse(get_pkg_data_filename("data/no_field_not_empty_table.xml"))
table = votable.get_first_table()
assert len(table.fields) == 0
assert len(table.infos) == 1
def test_no_field_not_empty_table_exception():
with pytest.raises(E25):
parse(
get_pkg_data_filename("data/no_field_not_empty_table.xml"),
verify="exception",
)
def test_binary2_masked_strings():
"""
Issue #8995.
"""
# Read a VOTable which sets the null mask bit for each empty string value.
votable = parse(get_pkg_data_filename("data/binary2_masked_strings.xml"))
table = votable.get_first_table()
astropy_table = table.to_table()
# Ensure string columns have no masked values and can be written out
assert not np.any(table.array.mask["epoch_photometry_url"])
output = io.BytesIO()
astropy_table.write(output, format="votable")
def test_validate_output_invalid():
"""
Issue #12603. Test that we get the correct output from votable.validate with an invalid
votable.
"""
# A votable with errors
invalid_votable_filepath = get_pkg_data_filename("data/regression.xml")
# When output is None, check that validate returns validation output as a string
validate_out = validate(invalid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known error string
assert "E02: Incorrect number of elements in array." in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(invalid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is not valid)
assert validate_out is False
def test_validate_output_valid():
"""
Issue #12603. Test that we get the correct output from votable.validate with a valid
votable.
"""
# A valid votable. (Example from the votable standard:
# https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html )
valid_votable_filepath = get_pkg_data_filename("data/valid_votable.xml")
# When output is None, check that validate returns validation output as a string
validate_out = validate(valid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known good output string
assert "astropy.io.votable found no violations" in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(valid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is valid)
assert validate_out is True
def test_validate_tilde_path(home_is_data):
validate(os.path.join("~", "valid_votable.xml"))
def test_is_votable_tilde_path(home_is_data):
assert is_votable(os.path.join("~", "valid_votable.xml"))
class TestVerifyOptions:
# Start off by checking the default (ignore)
def test_default(self):
parse(get_pkg_data_filename("data/gemini.xml"))
# Then try the various explicit options
def test_verify_ignore(self):
parse(get_pkg_data_filename("data/gemini.xml"), verify="ignore")
def test_verify_warn(self):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"), verify="warn")
assert len(w) == 24
def test_verify_exception(self):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"), verify="exception")
# Make sure the deprecated pedantic option still works for now
def test_pedantic_false(self):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"), pedantic=False)
assert len(w) == 25
def test_pedantic_true(self):
with pytest.warns(AstropyDeprecationWarning):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"), pedantic=True)
# Make sure that the default behavior can be set via configuration items
def test_conf_verify_ignore(self):
with conf.set_temp("verify", "ignore"):
parse(get_pkg_data_filename("data/gemini.xml"))
def test_conf_verify_warn(self):
with conf.set_temp("verify", "warn"):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"))
assert len(w) == 24
def test_conf_verify_exception(self):
with conf.set_temp("verify", "exception"):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"))
# And make sure the old configuration item will keep working
def test_conf_pedantic_false(self, tmp_path):
with set_temp_config(tmp_path):
with open(tmp_path / "astropy" / "astropy.cfg", "w") as f:
f.write("[io.votable]\npedantic = False")
reload_config("astropy.io.votable")
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"))
assert len(w) == 25
def test_conf_pedantic_true(self, tmp_path):
with set_temp_config(tmp_path):
with open(tmp_path / "astropy" / "astropy.cfg", "w") as f:
f.write("[io.votable]\npedantic = True")
reload_config("astropy.io.votable")
with pytest.warns(AstropyDeprecationWarning):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"))
|
6ba4b00ffe947eff9b2a39fd3f8384aebd4621d25bbee4cdb5a4ad3796f57bc6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Validates a large collection of web-accessible VOTable files,
and generates a report as a directory tree of HTML files.
"""
# STDLIB
import os
# LOCAL
from astropy.utils.data import get_pkg_data_filename
from . import html, result
__all__ = ["make_validation_report"]
def get_srcdir():
return os.path.dirname(__file__)
def get_urls(destdir, s):
import gzip
types = ["good", "broken", "incorrect"]
seen = set()
urls = []
for type in types:
filename = get_pkg_data_filename(f"data/urls/cone.{type}.dat.gz")
with gzip.open(filename, "rb") as fd:
for url in fd.readlines():
next(s)
url = url.strip()
if url not in seen:
with result.Result(url, root=destdir) as r:
r["expected"] = type
urls.append(url)
seen.add(url)
return urls
def download(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.download_xml_content()
def validate_vo(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.validate_vo()
def votlint_validate(args):
path_to_stilts_jar, url, destdir = args
with result.Result(url, root=destdir) as r:
if r["network_error"] is None:
r.validate_with_votlint(path_to_stilts_jar)
def write_html_result(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
html.write_result(r)
def write_subindex(args):
subset, destdir, total = args
html.write_index_table(destdir, *subset, total=total)
def make_validation_report(
urls=None,
destdir="astropy.io.votable.validator.results",
multiprocess=True,
stilts=None,
):
"""
Validates a large collection of web-accessible VOTable files.
Generates a report as a directory tree of HTML files.
Parameters
----------
urls : list of str, optional
If provided, is a list of HTTP urls to download VOTable files
from. If not provided, a built-in set of ~22,000 urls
compiled by HEASARC will be used.
destdir : path-like, optional
The directory to write the report to. By default, this is a
directory called ``'results'`` in the current directory. If the
directory does not exist, it will be created.
multiprocess : bool, optional
If `True` (default), perform validations in parallel using all
of the cores on this machine.
stilts : path-like, optional
To perform validation with ``votlint`` from the the Java-based `STILTS`_
VOTable parser, in addition to `astropy.io.votable`, set this to the
path of the ``'stilts.jar'`` file. ``java`` on the system shell
path will be used to run it.
Notes
-----
Downloads of each given URL will be performed only once and cached
locally in *destdir*. To refresh the cache, remove *destdir*
first.
"""
from astropy.utils.console import ProgressBar, Spinner, color_print
if stilts is not None:
if not os.path.exists(stilts):
raise ValueError(f"{stilts} does not exist.")
destdir = os.path.expanduser(destdir)
destdir = os.path.abspath(destdir)
if urls is None:
with Spinner("Loading URLs", "green") as s:
urls = get_urls(destdir, s)
else:
urls = [url.encode() for url in urls if isinstance(url, str)]
color_print("Marking URLs", "green")
for url in ProgressBar(urls):
with result.Result(url, root=destdir) as r:
r["expected"] = type
args = [(url, destdir) for url in urls]
color_print("Downloading VO files", "green")
ProgressBar.map(download, args, multiprocess=multiprocess)
color_print("Validating VO files", "green")
ProgressBar.map(validate_vo, args, multiprocess=multiprocess)
if stilts is not None:
color_print("Validating with votlint", "green")
votlint_args = [(stilts, x, destdir) for x in urls]
ProgressBar.map(votlint_validate, votlint_args, multiprocess=multiprocess)
color_print("Generating HTML files", "green")
ProgressBar.map(write_html_result, args, multiprocess=multiprocess)
with Spinner("Grouping results", "green") as s:
subsets = result.get_result_subsets(urls, destdir, s)
color_print("Generating index", "green")
html.write_index(subsets, urls, destdir)
color_print("Generating subindices", "green")
subindex_args = [(subset, destdir, len(urls)) for subset in subsets]
ProgressBar.map(write_subindex, subindex_args, multiprocess=multiprocess)
|
696e61915ca38a40e09013e9d3232910c837399192b8a1ee782c34c17705dc85 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Read/Write/Interchange methods for `astropy.cosmology`. **NOT public API**.
"""
# Import to register with the I/O machinery
from . import cosmology, ecsv, html, mapping, model, row, table, yaml
|
5fabad8337582bee0b45036a1851c5050eec09adf1aba0a5c95d7d4a22c9227d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.core`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import inspect
import pickle
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Cosmology, FlatCosmologyMixin
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter
from astropy.table import Column, QTable, Table
from astropy.utils.compat import PYTHON_LT_3_11
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.metadata import MetaData
from .test_connect import ReadWriteTestMixin, ToFromFormatTestMixin
from .test_parameter import ParameterTestMixin
##############################################################################
# SETUP / TEARDOWN
scalar_zs = [
0,
1,
1100, # interesting times
# FIXME! np.inf breaks some funcs. 0 * inf is an error
np.float64(3300), # different type
2 * cu.redshift,
3 * u.one, # compatible units
]
_zarr = np.linspace(0, 1e5, num=20)
array_zs = [
_zarr, # numpy
_zarr.tolist(), # pure python
Column(_zarr), # table-like
_zarr * cu.redshift, # Quantity
]
valid_zs = scalar_zs + array_zs
invalid_zs = [
(None, TypeError), # wrong type
# Wrong units (the TypeError is for the cython, which can differ)
(4 * u.MeV, (u.UnitConversionError, TypeError)), # scalar
([0, 1] * u.m, (u.UnitConversionError, TypeError)), # array
]
class SubCosmology(Cosmology):
"""Defined here to be serializable."""
H0 = Parameter(unit="km/(s Mpc)")
Tcmb0 = Parameter(unit=u.K)
m_nu = Parameter(unit=u.eV)
def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
self.H0 = H0
self.Tcmb0 = Tcmb0
self.m_nu = m_nu
@property
def is_flat(self):
return super().is_flat()
##############################################################################
# TESTS
##############################################################################
class MetaTestMixin:
"""Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology."""
def test_meta_on_class(self, cosmo_cls):
assert isinstance(cosmo_cls.meta, MetaData)
def test_meta_on_instance(self, cosmo):
assert isinstance(cosmo.meta, dict) # test type
# value set at initialization
assert cosmo.meta == self.cls_kwargs.get("meta", {})
def test_meta_mutable(self, cosmo):
"""The metadata is NOT immutable on a cosmology"""
key = tuple(cosmo.meta.keys())[0] # select some key
cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable
class CosmologyTest(
ParameterTestMixin,
MetaTestMixin,
ReadWriteTestMixin,
ToFromFormatTestMixin,
metaclass=abc.ABCMeta,
):
"""
Test subclasses of :class:`astropy.cosmology.Cosmology`.
"""
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
def teardown_class(self):
pass
@property
def cls_args(self):
return tuple(self._cls_args.values())
@pytest.fixture(scope="class")
def cosmo_cls(self):
"""The Cosmology class as a :func:`pytest.fixture`."""
return self.cls
@pytest.fixture(scope="function") # ensure not cached.
def ba(self):
"""Return filled `inspect.BoundArguments` for cosmology."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return ba
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""The cosmology instance with which to test."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return cosmo_cls(*ba.args, **ba.kwargs)
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test creating subclasses registers classes and manages Parameters."""
class InitSubclassTest(cosmo_cls):
pass
# test parameters
assert InitSubclassTest.__parameters__ == cosmo_cls.__parameters__
# test and cleanup registry
registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__)
assert registrant is InitSubclassTest
def test_init_signature(self, cosmo_cls, cosmo):
"""Test class-property ``_init_signature``."""
# test presence
assert hasattr(cosmo_cls, "_init_signature")
assert hasattr(cosmo, "_init_signature")
# test internal consistency, so following tests can use either cls or instance.
assert cosmo_cls._init_signature == cosmo._init_signature
# test matches __init__, but without 'self'
sig = inspect.signature(cosmo.__init__) # (instances don't have self)
assert set(sig.parameters.keys()) == set(
cosmo._init_signature.parameters.keys()
)
assert all(
np.all(sig.parameters[k].default == p.default)
for k, p in cosmo._init_signature.parameters.items()
)
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
"""Test initialization."""
# Cosmology only does name and meta, but this subclass adds H0 & Tcmb0.
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta={"m": 1})
assert cosmo.name == "test_init"
assert cosmo.meta["m"] == 1
# if meta is None, it is changed to a dict
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta=None)
assert cosmo.meta == {}
def test_name(self, cosmo):
"""Test property ``name``."""
assert cosmo.name is cosmo._name # accesses private attribute
assert cosmo.name is None or isinstance(cosmo.name, str) # type
assert cosmo.name == self.cls_kwargs["name"] # test has expected value
# immutable
match = (
"can't set"
if PYTHON_LT_3_11
else f"property 'name' of {cosmo.__class__.__name__!r} object has no setter"
)
with pytest.raises(AttributeError, match=match):
cosmo.name = None
@abc.abstractmethod
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# ------------------------------------------------
# clone
def test_clone_identical(self, cosmo):
"""Test method ``.clone()`` if no (kw)args."""
assert cosmo.clone() is cosmo
def test_clone_name(self, cosmo):
"""Test method ``.clone()`` name argument."""
# test changing name. clone treats 'name' differently (see next test)
c = cosmo.clone(name="cloned cosmo")
assert c.name == "cloned cosmo" # changed
# show name is the only thing changed
c._name = cosmo.name # first change name back
assert c == cosmo
assert c.meta == cosmo.meta
# now change a different parameter and see how 'name' changes
c = cosmo.clone(meta={"test_clone_name": True})
assert c.name == cosmo.name + " (modified)"
def test_clone_meta(self, cosmo):
"""Test method ``.clone()`` meta argument: updates meta, doesn't clear."""
# start with no change
c = cosmo.clone(meta=None)
assert c.meta == cosmo.meta
# add something
c = cosmo.clone(meta=dict(test_clone_meta=True))
assert c.meta["test_clone_meta"] is True
c.meta.pop("test_clone_meta") # remove from meta
assert c.meta == cosmo.meta # now they match
def test_clone_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s).
Nothing here b/c no Parameters.
"""
def test_clone_fail_unexpected_arg(self, cosmo):
"""Test when ``.clone()`` gets an unexpected argument."""
with pytest.raises(TypeError, match="unexpected keyword argument"):
cosmo.clone(not_an_arg=4)
def test_clone_fail_positional_arg(self, cosmo):
with pytest.raises(TypeError, match="1 positional argument"):
cosmo.clone(None)
# ---------------------------------------------------------------
# comparison methods
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`."""
# to self
assert cosmo.is_equivalent(cosmo)
# same class, different instance
newclone = cosmo.clone(name="test_is_equivalent")
assert cosmo.is_equivalent(newclone)
assert newclone.is_equivalent(cosmo)
# different class and not convertible to Cosmology.
assert not cosmo.is_equivalent(2)
def test_equality(self, cosmo):
"""Test method ``.__eq__()."""
# wrong class
assert (cosmo != 2) and (2 != cosmo)
# correct
assert cosmo == cosmo
# different name <= not equal, but equivalent
newcosmo = cosmo.clone(name="test_equality")
assert (cosmo != newcosmo) and (newcosmo != cosmo)
assert cosmo.__equiv__(newcosmo) and newcosmo.__equiv__(cosmo)
# ---------------------------------------------------------------
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``.
This is a very general test and it is probably good to have a
hard-coded comparison.
"""
r = repr(cosmo)
# class in string rep
assert cosmo_cls.__qualname__ in r
assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing
r = r[len(cosmo_cls.__qualname__) + 1 :] # remove
# name in string rep
if cosmo.name is not None:
assert f'name="{cosmo.name}"' in r
assert r.index("name=") == 0
r = r[6 + len(cosmo.name) + 3 :] # remove
# parameters in string rep
ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__}
for k, v in ps.items():
sv = f"{k}={v}"
assert sv in r
assert r.index(k) == 0
r = r[len(sv) + 2 :] # remove
# ------------------------------------------------
@pytest.mark.parametrize("in_meta", [True, False])
@pytest.mark.parametrize("table_cls", [Table, QTable])
def test_astropy_table(self, cosmo, table_cls, in_meta):
"""Test ``astropy.table.Table(cosmology)``."""
tbl = table_cls(cosmo, cosmology_in_meta=in_meta)
assert isinstance(tbl, table_cls)
# the name & all parameters are columns
for n in ("name", *cosmo.__parameters__):
assert n in tbl.colnames
assert np.all(tbl[n] == getattr(cosmo, n))
# check if Cosmology is in metadata or a column
if in_meta:
assert tbl.meta["cosmology"] == cosmo.__class__.__qualname__
assert "cosmology" not in tbl.colnames
else:
assert "cosmology" not in tbl.meta
assert tbl["cosmology"][0] == cosmo.__class__.__qualname__
# the metadata is transferred
for k, v in cosmo.meta.items():
assert np.all(tbl.meta[k] == v)
# ===============================================================
# Usage Tests
def test_immutability(self, cosmo):
"""
Test immutability of cosmologies.
The metadata is mutable: see ``test_meta_mutable``.
"""
for n in cosmo.__all_parameters__:
with pytest.raises(AttributeError):
setattr(cosmo, n, getattr(cosmo, n))
def test_pickle_class(self, cosmo_cls, pickle_protocol):
"""Test classes can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo_cls, protocol=pickle_protocol)
unpickled = pickle.loads(f)
# test equality
assert unpickled == cosmo_cls
def test_pickle_instance(self, cosmo, pickle_protocol):
"""Test instances can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == cosmo
assert unpickled.meta == cosmo.meta
class TestCosmology(CosmologyTest):
"""Test :class:`astropy.cosmology.Cosmology`.
Subclasses should define tests for:
- ``test_clone_change_param()``
- ``test_repr()``
"""
def setup_class(self):
"""
Setup for testing.
Cosmology should not be instantiated, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubCosmology"] = SubCosmology
self.cls = SubCosmology
self._cls_args = dict(
H0=70 * (u.km / u.s / u.Mpc), Tcmb0=2.7 * u.K, m_nu=0.6 * u.eV
)
self.cls_kwargs = dict(name=self.__class__.__name__, meta={"a": "b"})
def teardown_class(self):
"""Teardown for testing."""
super().teardown_class(self)
_COSMOLOGY_CLASSES.pop("SubCosmology", None)
# ===============================================================
# Method & Attribute Tests
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``. It's an ABC."""
with pytest.raises(NotImplementedError, match="is_flat is not implemented"):
cosmo.is_flat
# -----------------------------------------------------------------------------
class FlatCosmologyMixinTest:
"""Tests for :class:`astropy.cosmology.core.FlatCosmologyMixin` subclasses.
The test suite structure mirrors the implementation of the tested code.
Just like :class:`astropy.cosmology.FlatCosmologyMixin` is an abstract
base class (ABC) that cannot be used by itself, so too is this corresponding
test class an ABC mixin.
E.g to use this class::
class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology):
...
"""
def test_nonflat_class_(self, cosmo_cls, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat_cls`."""
# Test it's a method on the class
assert issubclass(cosmo_cls, cosmo_cls.__nonflatclass__)
# It also works from the instance. # TODO! as a "metaclassmethod"
assert issubclass(cosmo_cls, cosmo.__nonflatclass__)
# Maybe not the most robust test, but so far all Flat classes have the
# name of their parent class.
assert cosmo.__nonflatclass__.__name__ in cosmo_cls.__name__
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
super().test_is_flat(cosmo_cls, cosmo)
# it's always True
assert cosmo.is_flat is True
def test_nonflat(self, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat`."""
assert cosmo.nonflat.is_equivalent(cosmo)
assert cosmo.is_equivalent(cosmo.nonflat)
# ------------------------------------------------
# clone
def test_clone_to_nonflat_equivalent(self, cosmo):
"""Test method ``.clone()``to_nonflat argument."""
# just converting the class
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
@abc.abstractmethod
def test_clone_to_nonflat_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s). No parameters
are changed here because FlatCosmologyMixin has no Parameters.
See class docstring for why this test method exists.
"""
# send to non-flat
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
# ------------------------------------------------
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`.
Normally this would pass up via super(), but ``__equiv__`` is meant
to be overridden, so we skip super().
e.g. FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestCosmology
vs FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestFLRW -> TestCosmology
"""
CosmologyTest.test_is_equivalent(self, cosmo)
# See FlatFLRWMixinTest for tests. It's a bit hard here since this class
# is for an ABC.
# ===============================================================
# Usage Tests
def test_subclassing(self, cosmo_cls):
"""Test when subclassing a flat cosmology."""
class SubClass1(cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass1.__nonflatclass__ is cosmo_cls.__nonflatclass__
# A more complex example is when Mixin classes are used.
class Mixin:
pass
class SubClass2(Mixin, cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass2.__nonflatclass__ is cosmo_cls.__nonflatclass__
# The order of the Mixin should not matter
class SubClass3(cosmo_cls, Mixin):
pass
# The classes have the same non-flat parent class
assert SubClass3.__nonflatclass__ is cosmo_cls.__nonflatclass__
def test__nonflatclass__multiple_nonflat_inheritance():
"""
Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.__nonflatclass__`
when there's more than one non-flat class in the inheritance.
"""
# Define a non-operable minimal subclass of Cosmology.
class SubCosmology2(Cosmology):
def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
@property
def is_flat(self):
return False
# Now make an ambiguous flat cosmology from the two SubCosmologies
with pytest.raises(TypeError, match="cannot create a consistent non-flat class"):
class FlatSubCosmology(FlatCosmologyMixin, SubCosmology, SubCosmology2):
@property
def nonflat(self):
pass
# -----------------------------------------------------------------------------
def test_flrw_moved_deprecation():
"""Test the deprecation warning about the move of FLRW classes."""
from astropy.cosmology import flrw
# it's deprecated to import `flrw/*` from `core.py`
with pytest.warns(AstropyDeprecationWarning):
from astropy.cosmology.core import FLRW
# but they are the same object
assert FLRW is flrw.FLRW
|
d370f4e0bfaf44054b67c5b93e591c5a73c9fe9ef0795d838219ff549e886396 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy import exp
import astropy.units as u
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.utils import aszarr
from . import scalar_inv_efuncs
from .base import FLRW, FlatFLRWMixin
__all__ = ["w0wzCDM", "Flatw0wzCDM"]
__doctest_requires__ = {"*": ["scipy"]}
class w0wzCDM(FLRW):
"""
FLRW cosmology with a variable dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the simple form:
:math:`w(z) = w_0 + w_z z`.
This form is not recommended for z > 1.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0. This is pressure/density for
dark energy in units where c=1.
wz : float, optional
Derivative of the dark energy equation of state with respect to z.
A cosmological constant has w0=-1.0 and wz=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import w0wzCDM
>>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float")
wz = Parameter(
doc="Derivative of the dark energy equation of state w.r.t. z.",
fvalidate="float",
)
def __init__(
self,
H0,
Om0,
Ode0,
w0=-1.0,
wz=0.0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=Ode0,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
self.w0 = w0
self.wz = wz
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._w0,
self._wz,
)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0,
self._wz,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._w0,
self._wz,
)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is given by :math:`w(z) = w_0 + w_z z`.
"""
return self._w0 + self._wz * aszarr(z)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)}
\exp \left(-3 w_z z\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
return zp1 ** (3.0 * (1.0 + self._w0 - self._wz)) * exp(-3.0 * self._wz * z)
class Flatw0wzCDM(FlatFLRWMixin, w0wzCDM):
"""
FLRW cosmology with a variable dark energy equation of state and no curvature.
The equation for the dark energy equation of state uses the simple form:
:math:`w(z) = w_0 + w_z z`.
This form is not recommended for z > 1.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at z=0. This is pressure/density for
dark energy in units where c=1.
wz : float, optional
Derivative of the dark energy equation of state with respect to z.
A cosmological constant has w0=-1.0 and wz=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import Flatw0wzCDM
>>> cosmo = Flatw0wzCDM(H0=70, Om0=0.3, w0=-0.9, wz=0.2)
The comoving distance in Mpc at redshift z:
>>> cosmo.comoving_distance(0.5)
<Quantity 1982.66012926 Mpc>
"""
def __init__(
self,
H0,
Om0,
w0=-1.0,
wz=0.0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=0.0,
w0=w0,
wz=wz,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wzcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._w0, self._wz)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wzcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0,
self._wz,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wzcdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._w0,
self._wz,
)
|
88092626ba196c98bc5fd9efe9b87a3c6ce35342d8dc13cbfe34cfc05566d192 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy import exp
import astropy.units as u
from astropy.cosmology import units as cu
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.utils import aszarr
from . import scalar_inv_efuncs
from .base import FLRW, FlatFLRWMixin
__all__ = ["wpwaCDM", "FlatwpwaCDM"]
__doctest_requires__ = {"*": ["scipy"]}
class wpwaCDM(FLRW):
r"""
FLRW cosmology with a CPL dark energy equation of state, a pivot redshift,
and curvature.
The equation for the dark energy equation of state uses the CPL form as
described in Chevallier & Polarski [1]_ and Linder [2]_, but modified to
have a pivot redshift as in the findings of the Dark Energy Task Force
[3]_: :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
wp : float, optional
Dark energy equation of state at the pivot redshift zp. This is
pressure/density for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0.
zp : float or quantity-like ['redshift'], optional
Pivot redshift -- the redshift where w(z) = wp
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import wpwaCDM
>>> cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.7, wp=-0.9, wa=0.2, zp=0.4)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
.. [3] Albrecht, A., Amendola, L., Bernstein, G., Clowe, D., Eisenstein,
D., Guzzo, L., Hirata, C., Huterer, D., Kirshner, R., Kolb, E., &
Nichol, R. (2009). Findings of the Joint Dark Energy Mission Figure
of Merit Science Working Group. arXiv e-prints, arXiv:0901.0721.
"""
wp = Parameter(
doc="Dark energy equation of state at the pivot redshift zp.", fvalidate="float"
)
wa = Parameter(
doc="Negative derivative of dark energy equation of state w.r.t. a.",
fvalidate="float",
)
zp = Parameter(doc="The pivot redshift, where w(z) = wp.", unit=cu.redshift)
def __init__(
self,
H0,
Om0,
Ode0,
wp=-1.0,
wa=0.0,
zp=0.0 * cu.redshift,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=Ode0,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
self.wp = wp
self.wa = wa
self.zp = zp
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
apiv = 1.0 / (1.0 + self._zp.value)
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._wp,
apiv,
self._wa,
)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0 + self._Onu0,
self._wp,
apiv,
self._wa,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ok0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._wp,
apiv,
self._wa,
)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = w_p + w_a (a_p - a)` where
:math:`a = 1/1+z` and :math:`a_p = 1 / 1 + z_p`.
"""
apiv = 1.0 / (1.0 + self._zp.value)
return self._wp + self._wa * (apiv - 1.0 / (aszarr(z) + 1.0))
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
a_p = \frac{1}{1 + z_p}
I = \left(1 + z\right)^{3 \left(1 + w_p + a_p w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
apiv = 1.0 / (1.0 + self._zp.value)
return zp1 ** (3.0 * (1.0 + self._wp + apiv * self._wa)) * exp(
-3.0 * self._wa * z / zp1
)
class FlatwpwaCDM(FlatFLRWMixin, wpwaCDM):
r"""
FLRW cosmology with a CPL dark energy equation of state, a pivot redshift,
and no curvature.
The equation for the dark energy equation of state uses the CPL form as
described in Chevallier & Polarski [1]_ and Linder [2]_, but modified to
have a pivot redshift as in the findings of the Dark Energy Task Force
[3]_: :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
wp : float, optional
Dark energy equation of state at the pivot redshift zp. This is
pressure/density for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0.
zp : float or quantity-like ['redshift'], optional
Pivot redshift -- the redshift where w(z) = wp
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import FlatwpwaCDM
>>> cosmo = FlatwpwaCDM(H0=70, Om0=0.3, wp=-0.9, wa=0.2, zp=0.4)
The comoving distance in Mpc at redshift z:
>>> cosmo.comoving_distance(0.5)
<Quantity 1868.68474438 Mpc>
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
.. [3] Albrecht, A., Amendola, L., Bernstein, G., Clowe, D., Eisenstein,
D., Guzzo, L., Hirata, C., Huterer, D., Kirshner, R., Kolb, E., &
Nichol, R. (2009). Findings of the Joint Dark Energy Mission Figure
of Merit Science Working Group. arXiv e-prints, arXiv:0901.0721.
"""
def __init__(
self,
H0,
Om0,
wp=-1.0,
wa=0.0,
zp=0.0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None
):
super().__init__(
H0=H0,
Om0=Om0,
Ode0=0.0,
wp=wp,
wa=wa,
zp=zp,
Tcmb0=Tcmb0,
Neff=Neff,
m_nu=m_nu,
Ob0=Ob0,
name=name,
meta=meta,
)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
apiv = 1.0 / (1.0 + self._zp)
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fwpwacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._wp,
apiv,
self._wa,
)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fwpwacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0 + self._Onu0,
self._wp,
apiv,
self._wa,
)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fwpwacdm_inv_efunc
self._inv_efunc_scalar_args = (
self._Om0,
self._Ode0,
self._Ogamma0,
self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list,
self._wp,
apiv,
self._wa,
)
|
b945f2a1fc80a82b3c0df475544a7ed193513033221b2c63f4a3e752cb18b701 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import inspect
from collections import OrderedDict
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy.cosmology import Cosmology
from astropy.cosmology.io.mapping import from_mapping, to_mapping
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromMappingTestMixin(ToFromTestMixinBase):
"""Tests for a Cosmology[To/From]Format with ``format="mapping"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_mapping_default(self, cosmo, to_format):
"""Test default usage of Cosmology -> mapping."""
m = to_format("mapping")
keys = tuple(m.keys())
assert isinstance(m, dict)
# Check equality of all expected items
assert keys[0] == "cosmology"
assert m.pop("cosmology") is cosmo.__class__
assert keys[1] == "name"
assert m.pop("name") == cosmo.name
for i, k in enumerate(cosmo.__parameters__, start=2):
assert keys[i] == k
assert np.array_equal(m.pop(k), getattr(cosmo, k))
assert keys[-1] == "meta"
assert m.pop("meta") == cosmo.meta
# No unexpected items
assert not m
def test_to_mapping_wrong_cls(self, to_format):
"""Test incorrect argument ``cls`` in ``to_mapping()``."""
with pytest.raises(TypeError, match="'cls' must be"):
to_format("mapping", cls=list)
@pytest.mark.parametrize("map_cls", [dict, OrderedDict])
def test_to_mapping_cls(self, to_format, map_cls):
"""Test argument ``cls`` in ``to_mapping()``."""
m = to_format("mapping", cls=map_cls)
assert isinstance(m, map_cls) # test type
def test_to_mapping_cosmology_as_str(self, cosmo_cls, to_format):
"""Test argument ``cosmology_as_str`` in ``to_mapping()``."""
default = to_format("mapping")
# Cosmology is the class
m = to_format("mapping", cosmology_as_str=False)
assert inspect.isclass(m["cosmology"])
assert cosmo_cls is m["cosmology"]
assert m == default # False is the default option
# Cosmology is a string
m = to_format("mapping", cosmology_as_str=True)
assert isinstance(m["cosmology"], str)
assert m["cosmology"] == cosmo_cls.__qualname__ # Correct class
assert tuple(m.keys())[0] == "cosmology" # Stayed at same index
def test_tofrom_mapping_cosmology_as_str(self, cosmo, to_format, from_format):
"""Test roundtrip with ``cosmology_as_str=True``.
The test for the default option (`False`) is in ``test_tofrom_mapping_instance``.
"""
m = to_format("mapping", cosmology_as_str=True)
got = from_format(m, format="mapping")
assert got == cosmo
assert got.meta == cosmo.meta
def test_to_mapping_move_from_meta(self, to_format):
"""Test argument ``move_from_meta`` in ``to_mapping()``."""
default = to_format("mapping")
# Metadata is 'separate' from main mapping
m = to_format("mapping", move_from_meta=False)
assert "meta" in m.keys()
assert not any(k in m for k in m["meta"]) # Not added to main
assert m == default # False is the default option
# Metadata is mixed into main mapping.
m = to_format("mapping", move_from_meta=True)
assert "meta" not in m.keys()
assert all(k in m for k in default["meta"]) # All added to main
# The parameters take precedence over the metadata
assert all(np.array_equal(v, m[k]) for k, v in default.items() if k != "meta")
def test_tofrom_mapping_move_tofrom_meta(self, cosmo, to_format, from_format):
"""Test roundtrip of ``move_from/to_meta`` in ``to/from_mapping()``."""
# Metadata is mixed into main mapping.
m = to_format("mapping", move_from_meta=True)
# (Just adding something to ensure there's 'metadata')
m["mismatching"] = "will error"
# (Tests are different if the last argument is a **kwarg)
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(m, format="mapping")
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# Reading with mismatching parameters errors...
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(m, format="mapping")
# unless mismatched are moved to meta.
got = from_format(m, format="mapping", move_to_meta=True)
assert got == cosmo # (Doesn't check metadata)
assert got.meta["mismatching"] == "will error"
# -----------------------------------------------------
def test_from_not_mapping(self, cosmo, from_format):
"""Test incorrect map type in ``from_mapping()``."""
with pytest.raises((TypeError, ValueError)):
from_format("NOT A MAP", format="mapping")
def test_from_mapping_default(self, cosmo, to_format, from_format):
"""Test (cosmology -> Mapping) -> cosmology."""
m = to_format("mapping")
# Read from exactly as given.
got = from_format(m, format="mapping")
assert got == cosmo
assert got.meta == cosmo.meta
# Reading auto-identifies 'format'
got = from_format(m)
assert got == cosmo
assert got.meta == cosmo.meta
def test_fromformat_subclass_partial_info_mapping(self, cosmo):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
m = cosmo.to_format("mapping")
# partial information
m.pop("cosmology", None)
m.pop("Tcmb0", None)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo.__class__.from_format(m, format="mapping")
got2 = Cosmology.from_format(m, format="mapping", cosmology=cosmo.__class__)
got3 = Cosmology.from_format(
m, format="mapping", cosmology=cosmo.__class__.__qualname__
)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo.__class__._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
@pytest.mark.parametrize("format", [True, False, None, "mapping"])
def test_is_equivalent_to_mapping(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a mapping.
"""
obj = to_format("mapping")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (format is not False)
class TestToFromMapping(ToFromDirectTestBase, ToFromMappingTestMixin):
"""Directly test ``to/from_mapping``."""
def setup_class(self):
self.functions = {"to": to_mapping, "from": from_mapping}
@pytest.mark.skip("N/A")
def test_fromformat_subclass_partial_info_mapping(self):
"""This test does not apply to the direct functions."""
|
1fd06ef2bd8cc9220fca76e9661d1fe37d492424e40ebc7c80d9ce7b0330c4f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.wpwazpcdm`."""
import numpy as np
# THIRD PARTY
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import FlatwpwaCDM, wpwaCDM
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.test_core import ParameterTestMixin
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .test_base import FlatFLRWMixinTest, FLRWTest
from .test_w0wacdm import ParameterwaTestMixin
##############################################################################
# PARAMETERS
COMOVING_DISTANCE_EXAMPLE_KWARGS = {"wp": -0.9, "zp": 0.5, "wa": 0.1, "Tcmb0": 0.0}
##############################################################################
# TESTS
##############################################################################
class ParameterwpTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wp on a Cosmology.
wp is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wp(self, cosmo_cls, cosmo):
"""Test Parameter ``wp``."""
# on the class
assert isinstance(cosmo_cls.wp, Parameter)
assert "at the pivot" in cosmo_cls.wp.__doc__
assert cosmo_cls.wp.unit is None
# on the instance
assert cosmo.wp is cosmo._wp
assert cosmo.wp == self.cls_kwargs["wp"]
def test_init_wp(self, cosmo_cls, ba):
"""Test initialization for values of ``wp``."""
# test that it works with units
ba.arguments["wp"] = ba.arguments["wp"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wp == ba.arguments["wp"]
# also without units
ba.arguments["wp"] = ba.arguments["wp"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wp == ba.arguments["wp"]
# must be dimensionless
ba.arguments["wp"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterzpTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` zp on a Cosmology.
zp is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_zp(self, cosmo_cls, cosmo):
"""Test Parameter ``zp``."""
# on the class
assert isinstance(cosmo_cls.zp, Parameter)
assert "pivot redshift" in cosmo_cls.zp.__doc__
assert cosmo_cls.zp.unit == cu.redshift
# on the instance
assert cosmo.zp is cosmo._zp
assert cosmo.zp == self.cls_kwargs["zp"] << cu.redshift
def test_init_zp(self, cosmo_cls, ba):
"""Test initialization for values of ``zp``."""
# test that it works with units
ba.arguments["zp"] = ba.arguments["zp"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.zp == ba.arguments["zp"]
# also without units
ba.arguments["zp"] = ba.arguments["zp"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.zp.value == ba.arguments["zp"]
# must be dimensionless
ba.arguments["zp"] = 10 * u.km
with pytest.raises(u.UnitConversionError):
cosmo_cls(*ba.args, **ba.kwargs)
class TestwpwaCDM(
FLRWTest, ParameterwpTestMixin, ParameterwaTestMixin, ParameterzpTestMixin
):
"""Test :class:`astropy.cosmology.wpwaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = wpwaCDM
self.cls_kwargs.update(wp=-0.9, wa=0.2, zp=0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(wp=0.1, wa=0.2, zp=14)
assert c.wp == 0.1
assert c.wa == 0.2
assert c.zp == 14
for n in set(cosmo.__parameters__) - {"wp", "wa", "zp"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
# @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below
def test_w(self, cosmo):
"""Test :meth:`astropy.cosmology.wpwaCDM.w`."""
# super().test_w(cosmo, z)
assert u.allclose(cosmo.w(0.5), -0.9)
assert u.allclose(
cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667, -0.82380952, -0.78266667],
)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'wpwaCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,'
" Ode0=0.73, wp=-0.9, wa=0.2, zp=0.5 redshift, Tcmb0=3.0 K,"
" Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.03)"
)
assert repr(cosmo) == expected
# ===============================================================
# Usage Tests
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.")
@pytest.mark.parametrize(
("args", "kwargs", "expected"),
[
( # no relativistic species
(75.0, 0.3, 0.6),
{},
[2954.68975298, 4599.83254834, 5643.04013201, 6373.36147627] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25, 0.5),
{"zp": 0.4, "Tcmb0": 3.0, "Neff": 3, "m_nu": 0 * u.eV},
[2919.00656215, 4558.0218123, 5615.73412391, 6366.10224229] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.25, 0.5),
{"zp": 1.0, "Tcmb0": 3.0, "Neff": 4, "m_nu": 5 * u.eV},
[2629.48489827, 3874.13392319, 4614.31562397, 5116.51184842] * u.Mpc,
),
# FLAT: these match the tests in TestFlatwpwaCDM, except Ode0 is set manually.
( # no relativistic species
(75.0, 0.3, 0.7),
{},
[3030.70481348, 4745.82435272, 5828.73710847, 6582.60454542] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25, 0.75),
{"zp": 0.4, "Tcmb0": 3.0, "Neff": 3, "m_nu": 0 * u.eV},
[3113.62199365, 4943.28425668, 6114.45491003, 6934.07461377] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.25, 0.2458794183661), # to make Ok0 = 0, Otot0 = 1
{"zp": 1.0, "Tcmb0": 3.0, "Neff": 4, "m_nu": 5 * u.eV},
[2517.08634022, 3694.21111754, 4402.17802962, 4886.65787948] * u.Mpc,
),
],
)
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
super().test_comoving_distance_example(
cosmo_cls, args, {**COMOVING_DISTANCE_EXAMPLE_KWARGS, **kwargs}, expected
)
class TestFlatwpwaCDM(FlatFLRWMixinTest, TestwpwaCDM):
"""Test :class:`astropy.cosmology.FlatwpwaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = FlatwpwaCDM
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'FlatwpwaCDM(name="ABCMeta", H0=70.0 km / (Mpc s),'
" Om0=0.27, wp=-0.9, wa=0.2, zp=0.5 redshift, Tcmb0=3.0 K,"
" Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.03)"
)
assert repr(cosmo) == expected
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.")
@pytest.mark.parametrize(
("args", "kwargs", "expected"),
[
( # no relativistic species
(75.0, 0.3),
{},
[3030.70481348, 4745.82435272, 5828.73710847, 6582.60454542] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25),
{"zp": 0.4, "wa": 0.1, "Tcmb0": 3.0, "Neff": 3, "m_nu": 0.0 * u.eV},
[3113.62199365, 4943.28425668, 6114.45491003, 6934.07461377] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.25),
{"zp": 1.0, "Tcmb0": 3.0, "Neff": 4, "m_nu": 5 * u.eV},
[2517.08634022, 3694.21111754, 4402.17802962, 4886.65787948] * u.Mpc,
),
],
)
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
super().test_comoving_distance_example(
cosmo_cls, args, {**COMOVING_DISTANCE_EXAMPLE_KWARGS, **kwargs}, expected
)
###############################################################################
# Comparison to Other Codes
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.")
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a Mathematica computation."""
z = np.array([0.2, 0.4, 0.9, 1.2])
# wpwa models
cosmo = wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, Tcmb0=0.0)
assert u.allclose(
cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc,
rtol=1e-4,
)
cosmo = wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, Tcmb0=0.0)
assert u.allclose(
cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc,
rtol=1e-4,
)
##############################################################################
# Miscellaneous
# TODO: these should be better integrated into the new test framework
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_de_densityscale():
cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, wa=0.2, zp=0.5)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert u.allclose(
cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439, 1.324988, 1.565746],
rtol=1e-4,
)
assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7)
assert u.allclose(
cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1.0, 2.0, 3.0]),
rtol=1e-7,
)
# Flat tests
cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, wa=0.2, zp=0.5)
flatcosmo = FlatwpwaCDM(H0=70, Om0=0.3, wp=-0.9, wa=0.2, zp=0.5)
assert u.allclose(
cosmo.de_density_scale(z), flatcosmo.de_density_scale(z), rtol=1e-7
)
|
df4ff519231b3f74c3e6c1adde9fb87f2d86c2c2c4e053443ad6078d5f59019b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.lambdacdm`."""
##############################################################################
# IMPORTS
# STDLIB
# THIRD PARTY
import pathlib
import numpy as np
import pytest
# LOCAL
import astropy.constants as const
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import FlatLambdaCDM, LambdaCDM
from astropy.cosmology.flrw.lambdacdm import ellipkinc, hyp2f1
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.cosmology.tests.test_core import invalid_zs, valid_zs
from astropy.table import QTable
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
from .test_base import FlatFLRWMixinTest, FLRWTest
##############################################################################
# TESTS
##############################################################################
@pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed")
def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
ellipkinc()
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
hyp2f1()
##############################################################################
class TestLambdaCDM(FLRWTest):
"""Test :class:`astropy.cosmology.LambdaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = LambdaCDM
# ===============================================================
# Method & Attribute Tests
_FLRW_redshift_methods = get_redshift_methods(
LambdaCDM, include_private=True, include_z2=False
) - {"_dS_age"}
# `_dS_age` is removed because it doesn't strictly rely on the value of `z`,
# so any input that doesn't trip up ``np.shape`` is "valid"
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize("method", _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
@pytest.mark.parametrize("z", valid_zs)
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.LambdaCDM.w`."""
super().test_w(cosmo, z)
w = cosmo.w(z)
assert u.allclose(w, -1.0)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'LambdaCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,'
" Ode0=0.73, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)"
)
assert repr(cosmo) == expected
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize(
("args", "kwargs", "expected"),
[
( # no relativistic species
(75.0, 0.25, 0.5),
{"Tcmb0": 0.0},
[2953.93001902, 4616.7134253, 5685.07765971, 6440.80611897] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25, 0.6),
{"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV)},
[3037.12620424, 4776.86236327, 5889.55164479, 6671.85418235] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.3, 0.4),
{"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(10.0, u.eV)},
[2471.80626824, 3567.1902565, 4207.15995626, 4638.20476018] * u.Mpc,
),
],
)
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected)
# -----------------------------------------------------------------------------
class TestFlatLambdaCDM(FlatFLRWMixinTest, TestLambdaCDM):
"""Test :class:`astropy.cosmology.FlatLambdaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = FlatLambdaCDM
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize("method", TestLambdaCDM._FLRW_redshift_methods - {"Otot"})
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ===============================================================
# Method & Attribute Tests
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'FlatLambdaCDM(name="ABCMeta", H0=70.0 km / (Mpc s),'
" Om0=0.27, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)"
)
assert repr(cosmo) == expected
# ===============================================================
# Usage Tests
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize(
("args", "kwargs", "expected"),
[
( # no relativistic species
(75.0, 0.25),
{"Tcmb0": 0.0},
[3180.83488552, 5060.82054204, 6253.6721173, 7083.5374303] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25),
{"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV)},
[3180.42662867, 5059.60529655, 6251.62766102, 7080.71698117] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.25),
{"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(10.0, u.eV)},
[2337.54183142, 3371.91131264, 3988.40711188, 4409.09346922] * u.Mpc,
),
( # work the scalar nu density functions
(75.0, 0.25),
{"Tcmb0": 3.0, "m_nu": u.Quantity([10.0, 0, 0], u.eV)},
[2777.71589173, 4186.91111666, 5046.0300719, 5636.10397302] * u.Mpc,
),
( # work the scalar nu density functions
(75.0, 0.25),
{"Tcmb0": 3.0, "m_nu": u.Quantity([10.0, 5, 0], u.eV)},
[2636.48149391, 3913.14102091, 4684.59108974, 5213.07557084] * u.Mpc,
),
( # work the scalar nu density functions
(75.0, 0.25),
{"Tcmb0": 3.0, "m_nu": u.Quantity([4.0, 5, 9], u.eV)},
[2563.5093049, 3776.63362071, 4506.83448243, 5006.50158829] * u.Mpc,
),
( # work the scalar nu density functions
(75.0, 0.25),
{"Tcmb0": 3.0, "Neff": 4.2, "m_nu": u.Quantity([1.0, 4.0, 5, 9], u.eV)},
[2525.58017482, 3706.87633298, 4416.58398847, 4901.96669755] * u.Mpc,
),
],
)
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected)
##############################################################################
# Comparison to Other Codes
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.")
def test_flat_z1():
"""Test a flat cosmology at z=1 against several other on-line calculators.
Test values were taken from the following web cosmology calculators on
2012-02-11:
Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
(https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)
Kempner: http://www.kempner.net/cosmic.php
iCosmos: http://www.icosmos.co.uk/index.html
"""
cosmo = FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
# The order of values below is Wright, Kempner, iCosmos'
assert u.allclose(
cosmo.comoving_distance(1), [3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4
)
assert u.allclose(
cosmo.angular_diameter_distance(1),
[1682.3, 1682.4, 1682.3994] * u.Mpc,
rtol=1e-4,
)
assert u.allclose(
cosmo.luminosity_distance(1), [6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4
)
assert u.allclose(
cosmo.lookback_time(1), [7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3
)
assert u.allclose(
cosmo.lookback_distance(1), [2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3
)
##############################################################################
# Regression Tests
SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES = [
FlatLambdaCDM(H0=70, Om0=0.0, Tcmb0=0.0), # de Sitter
FlatLambdaCDM(H0=70, Om0=1.0, Tcmb0=0.0), # Einstein - de Sitter
FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0.0), # Hypergeometric
LambdaCDM(H0=70, Om0=0.3, Ode0=0.6, Tcmb0=0.0), # Elliptic
]
ITERABLE_REDSHIFTS = [
(0, 1, 2, 3, 4), # tuple
[0, 1, 2, 3, 4], # list
np.array([0, 1, 2, 3, 4]), # array
]
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("cosmo", SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)
@pytest.mark.parametrize("z", ITERABLE_REDSHIFTS)
def test_comoving_distance_iterable_argument(cosmo, z):
"""
Regression test for #10980
Test that specialized comoving distance methods handle iterable arguments.
"""
assert u.allclose(
cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0.0, z)
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("cosmo", SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)
def test_comoving_distance_broadcast(cosmo):
"""
Regression test for #10980
Test that specialized comoving distance methods broadcast array arguments.
"""
z1 = np.zeros((2, 5))
z2 = np.ones((3, 1, 5))
z3 = np.ones((7, 5))
output_shape = np.broadcast(z1, z2).shape
# Check compatible array arguments return an array with the correct shape
assert cosmo._comoving_distance_z1z2(z1, z2).shape == output_shape
# Check incompatible array arguments raise an error
with pytest.raises(ValueError, match="z1 and z2 have different shapes"):
cosmo._comoving_distance_z1z2(z1, z3)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_elliptic_comoving_distance_z1z2():
"""Regression test for #8388."""
cosmo = LambdaCDM(70.0, 2.3, 0.05, Tcmb0=0)
z = 0.2
assert u.allclose(
cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0.0, z)
)
assert u.allclose(
cosmo._elliptic_comoving_distance_z1z2(0.0, z),
cosmo._integral_comoving_distance_z1z2(0.0, z),
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparison is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc,
rtol=5e-4,
)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc,
rtol=5e-4,
)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc,
rtol=5e-4,
)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc,
rtol=1e-5,
)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc,
rtol=1e-5,
)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc,
rtol=1e-5,
)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0**3 * 2.725**4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7**2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert u.allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert u.allclose(cosmo.comoving_distance(z.astype(int)), targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert u.allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize(
"file_name", ["cosmo_flat.ecsv", "cosmo_open.ecsv", "cosmo_closed.ecsv"]
)
def test_flat_open_closed_icosmo(file_name):
"""Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
with u.add_enabled_units(cu):
tbl = QTable.read(pathlib.Path(__file__).parent / "data" / file_name)
cosmo = LambdaCDM(
H0=100 * tbl.meta["h"], Om0=tbl.meta["Om"], Ode0=tbl.meta["Ol"], Tcmb0=0.0
)
assert u.allclose(cosmo.comoving_transverse_distance(tbl["redshift"]), tbl["dm"])
assert u.allclose(cosmo.angular_diameter_distance(tbl["redshift"]), tbl["da"])
assert u.allclose(cosmo.luminosity_distance(tbl["redshift"]), tbl["dl"])
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_comoving_transverse_distance_z1z2():
tcos = FlatLambdaCDM(100, 0.3, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert u.allclose(
tcos._comoving_transverse_distance_z1z2(1, 2), 1313.2232194828466 * u.Mpc
)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert u.allclose(
tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2),
)
# Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid.
tcos = FlatLambdaCDM(100, 1.5, Tcmb0=0.0)
results = (
2202.72682564,
1559.51679971,
-643.21002593,
1408.36365679,
85.09286258,
) * u.Mpc
assert u.allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert u.allclose(
tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2),
)
# Test non-flat cases to avoid simply testing
# comoving_distance_z1z2. Test array, array case.
tcos = LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)
results = (
3535.931375645655,
2226.430046551708,
-1208.6817970036532,
2595.567367601969,
151.36592003406884,
) * u.Mpc
assert u.allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results)
# Test positive curvature with scalar, array combination.
tcos = LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)
z1 = 0.1
z2 = 0, 0.1, 0.2, 0.5, 1.1, 2
results = (
-281.31602666724865,
0.0,
248.58093707820436,
843.9331377460543,
1618.6104987686672,
2287.5626543279927,
) * u.Mpc
assert u.allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_angular_diameter_distance_z1z2():
tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert u.allclose(
tcos.angular_diameter_distance_z1z2(1, 2), 646.22968662822018 * u.Mpc
)
z1 = 2 # Separate test for z2<z1, returns negative value with warning
z2 = 1
results = -969.34452994 * u.Mpc
with pytest.warns(AstropyUserWarning, match="less than first redshift"):
assert u.allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results)
z1 = 0, 0, 0.5, 1
z2 = 2, 1, 2.5, 1.1
results = (
1760.0628637762106,
1670.7497657219858,
1159.0970895962193,
115.72768186186921,
) * u.Mpc
assert u.allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results)
z1 = 0.1
z2 = 0.1, 0.2, 0.5, 1.1, 2
results = (0.0, 332.09893173, 986.35635069, 1508.37010062, 1621.07937976) * u.Mpc
assert u.allclose(tcos.angular_diameter_distance_z1z2(0.1, z2), results)
# Non-flat (positive Ok0) test
tcos = LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
assert u.allclose(
tcos.angular_diameter_distance_z1z2(1, 2), 620.1175337852428 * u.Mpc
)
# Non-flat (negative Ok0) test
tcos = LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)
assert u.allclose(
tcos.angular_diameter_distance_z1z2(1, 2), 228.42914659246014 * u.Mpc
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_massivenu_density():
# Testing neutrino density calculation
# Simple test cosmology, where we compare rho_nu and rho_gamma
# against the exact formula (eq 24/25 of Komatsu et al. 2011)
# computed using Mathematica. The approximation we use for f(y)
# is only good to ~ 0.5% (with some redshift dependence), so that's
# what we test to.
ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
# First try 3 massive neutrinos, all 100 eV -- note this is a universe
# seriously dominated by neutrinos!
tcos = FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(100.0, u.eV))
assert tcos.has_massive_nu
assert tcos.Neff == 3
nurel_exp = (
nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323, 15633.5, 171.801])
)
assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert u.allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.25, u.eV))
nurel_exp = (
nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312, 39.1005, 1.11086])
)
assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236, 0.06999286, 0.1344951])
assert u.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.01, u.eV))
nurel_exp = (
nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348, 1.90671, 1.00021])
)
assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732, 0.00268404, 0.0978313])
assert u.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
assert u.allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048], rtol=1e-4)
assert u.allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534], rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
tcos = FlatLambdaCDM(
80.0, 0.30, Tcmb0=3.0, Neff=3.04, m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV)
)
nurel_exp = (
nuprefac
* tcos.Neff
* np.array([149.386233, 74.87915, 50.0518, 14.002403, 1.03702333])
)
assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291, 0.01963451, 0.10227728])
assert u.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(int)
assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert u.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
##############################################################################
# Miscellaneous
# TODO: these should be better integrated into the new test framework
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_units():
"""Test if the right units are being returned"""
cosmo = FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm**3
assert cosmo.comoving_volume(1.0).unit == u.Mpc**3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag
def test_xtfuncs():
"""Test of absorption and lookback integrand"""
cosmo = LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
z = np.array([2.0, 3.2])
assert u.allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378, rtol=1e-4)
assert u.allclose(
cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541], rtol=1e-4
)
assert u.allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402, rtol=1e-4)
assert u.allclose(
cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758], rtol=1e-4
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_matter():
# Test non-relativistic matter evolution
tcos = FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
assert u.allclose(tcos.Om(0), 0.3)
assert u.allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert u.allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455], rtol=1e-4)
assert u.allclose(
tcos.Ob(z), [0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4
)
assert u.allclose(
tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636], rtol=1e-4
)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
assert u.allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = FlatLambdaCDM(70.0, 0.3)
assert u.allclose(tcos.Ok0, 0.0)
assert u.allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert u.allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0], rtol=1e-6)
# Not flat
tcos = LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
assert u.allclose(tcos.Ok0, 0.2)
assert u.allclose(tcos.Ok(0), 0.2)
assert u.allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692], rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
assert u.allclose(
tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z), [1.0, 1.0, 1.0, 1.0], rtol=1e-5
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
assert u.allclose(tcos.Ode0, 0.7)
assert u.allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert u.allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545], rtol=1e-5)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_tcmb():
cosmo = FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
assert u.allclose(cosmo.Tcmb0, 2.5 * u.K)
assert u.allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
assert u.allclose(cosmo.Tcmb(z), [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
assert u.allclose(cosmo.Tcmb(z), [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_tnu():
cosmo = FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert u.allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
assert u.allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
assert u.allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
assert u.allclose(cosmo.Tnu(z), expected, rtol=1e-6)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_kpc_methods():
cosmo = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert u.allclose(cosmo.arcsec_per_kpc_comoving(3), 0.0317179167 * u.arcsec / u.kpc)
assert u.allclose(cosmo.arcsec_per_kpc_proper(3), 0.1268716668 * u.arcsec / u.kpc)
assert u.allclose(cosmo.kpc_comoving_per_arcmin(3), 1891.6753126 * u.kpc / u.arcmin)
assert u.allclose(cosmo.kpc_proper_per_arcmin(3), 472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_comoving_volume():
c_flat = LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = (
np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802]) * u.Gpc**3
)
wright_open = (
np.array([20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814]) * u.Gpc**3
)
wright_closed = (
np.array([12.619, 44.708, 114.904, 173.709, 258.82, 358.992]) * u.Gpc**3
)
# The wright calculator isn't very accurate, so we use a rather
# modest precision
assert u.allclose(c_flat.comoving_volume(redshifts), wright_flat, rtol=1e-2)
assert u.allclose(c_open.comoving_volume(redshifts), wright_open, rtol=1e-2)
assert u.allclose(c_closed.comoving_volume(redshifts), wright_closed, rtol=1e-2)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_differential_comoving_volume():
from scipy.integrate import quad
c_flat = LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test that integration of differential_comoving_volume()
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = (
np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802]) * u.Gpc**3
)
wright_open = (
np.array([20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814]) * u.Gpc**3
)
wright_closed = (
np.array([12.619, 44.708, 114.904, 173.709, 258.82, 358.992]) * u.Gpc**3
)
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
def ftemp(x):
return c_flat.differential_comoving_volume(x).value
def otemp(x):
return c_open.differential_comoving_volume(x).value
def ctemp(x):
return c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
assert u.allclose(
np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0] for redshift in redshifts])
* u.Mpc**3,
wright_flat,
rtol=1e-2,
)
assert u.allclose(
np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0] for redshift in redshifts])
* u.Mpc**3,
wright_open,
rtol=1e-2,
)
assert u.allclose(
np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0] for redshift in redshifts])
* u.Mpc**3,
wright_closed,
rtol=1e-2,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert u.allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
assert u.allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)
assert u.allclose(tcos.age([1.0, 5.0]), [5.97113193, 1.20553129] * u.Gyr)
assert u.allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
# Add relativistic species
tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert u.allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)
assert u.allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)
# And massive neutrinos
tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0, m_nu=0.1 * u.eV)
assert u.allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)
assert u.allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert u.allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
assert u.allclose(tcos.distmod([1, 5]), [44.124857, 48.40167258] * u.mag)
assert u.allclose(tcos.distmod([1.0, 5.0]), [44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
assert u.allclose(
tcos.luminosity_distance([50, 100]), [16612.44047622, -46890.79092244] * u.Mpc
)
assert u.allclose(tcos.distmod([50, 100]), [46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_critical_density():
from astropy.constants import codata2014
# WMAP7 but with Omega_relativistic = 0
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py.
# critical_density0 is inversely proportional to G.
tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
fac = (const.G / codata2014.G).to(u.dimensionless_unscaled).value
assert u.allclose(
tcos.critical_density0 * fac, 9.309668456020899e-30 * (u.g / u.cm**3)
)
assert u.allclose(tcos.critical_density0, tcos.critical_density(0))
assert u.allclose(
tcos.critical_density([1, 5]) * fac,
[2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3),
)
assert u.allclose(
tcos.critical_density([1.0, 5.0]) * fac,
[2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3),
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_integral():
# Test integer vs. floating point inputs
cosmo = LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
assert u.allclose(
cosmo.comoving_distance(3), cosmo.comoving_distance(3.0), rtol=1e-7
)
assert u.allclose(
cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7,
)
assert u.allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
assert u.allclose(cosmo.efunc([1, 2, 6]), cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
assert u.allclose(
cosmo.inv_efunc([1, 2, 6]), cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_de_densityscale():
cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert u.allclose(cosmo.de_density_scale(z), [1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7)
assert u.allclose(
cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1.0, 2.0, 3.0]),
rtol=1e-7,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_comoving_distance_z1z2():
tcos = LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))
# Comoving distances are invertible
assert u.allclose(
tcos._comoving_distance_z1z2(1, 2), -tcos._comoving_distance_z1z2(2, 1)
)
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (
3767.90579253,
2386.25591391,
-1381.64987862,
2893.11776663,
174.1524683,
) * u.Mpc
assert u.allclose(tcos._comoving_distance_z1z2(z1, z2), results)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_age_in_special_cosmologies():
"""Check that age in de Sitter and Einstein-de Sitter Universes work.
Some analytic solutions fail at these critical points.
"""
c_dS = FlatLambdaCDM(100, 0, Tcmb0=0)
assert u.allclose(c_dS.age(z=0), np.inf * u.Gyr)
assert u.allclose(c_dS.age(z=1), np.inf * u.Gyr)
assert u.allclose(c_dS.lookback_time(z=0), 0 * u.Gyr)
assert u.allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr)
c_EdS = FlatLambdaCDM(100, 1, Tcmb0=0)
assert u.allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr)
assert u.allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr)
assert u.allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr)
assert u.allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distance_in_special_cosmologies():
"""Check that de Sitter and Einstein-de Sitter Universes both work.
Some analytic solutions fail at these critical points.
"""
c_dS = FlatLambdaCDM(100, 0, Tcmb0=0)
assert u.allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert u.allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = FlatLambdaCDM(100, 1, Tcmb0=0)
assert u.allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert u.allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
c_dS = LambdaCDM(100, 0, 1, Tcmb0=0)
assert u.allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert u.allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = LambdaCDM(100, 1, 0, Tcmb0=0)
assert u.allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert u.allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_absorption_distance():
tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert u.allclose(tcos.absorption_distance([1, 3]), [1.72576635, 7.98685853])
assert u.allclose(tcos.absorption_distance([1.0, 3.0]), [1.72576635, 7.98685853])
assert u.allclose(tcos.absorption_distance(3), 7.98685853)
assert u.allclose(tcos.absorption_distance(3.0), 7.98685853)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distance_broadcast():
"""Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = FlatLambdaCDM(H0=70, Om0=0.27, m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = [
"comoving_distance",
"luminosity_distance",
"comoving_transverse_distance",
"angular_diameter_distance",
"distmod",
"lookback_time",
"age",
"comoving_volume",
"differential_comoving_volume",
"kpc_comoving_per_arcmin",
]
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert u.allclose(value_flat, value_2d.flatten())
assert u.allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = [
"absorption_distance",
"Om",
"Ode",
"Ok",
"H",
"w",
"de_density_scale",
"Onu",
"Ogamma",
"nu_relative_density",
]
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert u.allclose(value_flat, value_2d.flatten())
assert u.allclose(value_flat, value_3d.flatten())
|
ff2366a66e0c08885442715e2f7efe4303f04f5d9676c9259c1b903204ab4bab | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.w0wzcdm`."""
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import Flatw0wzCDM, w0wzCDM
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.test_core import ParameterTestMixin
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .test_base import FlatFLRWMixinTest, FLRWTest
from .test_w0cdm import Parameterw0TestMixin
##############################################################################
# PARAMETERS
COMOVING_DISTANCE_EXAMPLE_KWARGS = {"w0": -0.9, "wz": 0.1, "Tcmb0": 0.0}
##############################################################################
# TESTS
##############################################################################
class ParameterwzTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wz on a Cosmology.
wz is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wz(self, cosmo_cls, cosmo):
"""Test Parameter ``wz``."""
# on the class
assert isinstance(cosmo_cls.wz, Parameter)
assert "Derivative of the dark energy" in cosmo_cls.wz.__doc__
assert cosmo_cls.wz.unit is None
# on the instance
assert cosmo.wz is cosmo._wz
assert cosmo.wz == self.cls_kwargs["wz"]
def test_init_wz(self, cosmo_cls, ba):
"""Test initialization for values of ``wz``."""
# test that it works with units
ba.arguments["wz"] = ba.arguments["wz"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wz == ba.arguments["wz"]
# also without units
ba.arguments["wz"] = ba.arguments["wz"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wz == ba.arguments["wz"]
# must be dimensionless
ba.arguments["wz"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
class Testw0wzCDM(FLRWTest, Parameterw0TestMixin, ParameterwzTestMixin):
"""Test :class:`astropy.cosmology.w0wzCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = w0wzCDM
self.cls_kwargs.update(w0=-1, wz=0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(w0=0.1, wz=0.2)
assert c.w0 == 0.1
assert c.wz == 0.2
for n in set(cosmo.__parameters__) - {"w0", "wz"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
# @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below
def test_w(self, cosmo):
"""Test :meth:`astropy.cosmology.w0wzCDM.w`."""
# super().test_w(cosmo, z)
assert u.allclose(cosmo.w(1.0), -0.5)
assert u.allclose(
cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), [-1.0, -0.75, -0.5, -0.25, 0.15]
)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'w0wzCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,'
" Ode0=0.73, w0=-1.0, wz=0.5, Tcmb0=3.0 K, Neff=3.04,"
" m_nu=[0. 0. 0.] eV, Ob0=0.03)"
)
assert repr(cosmo) == expected
# ===============================================================
# Usage Tests
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize(
("args", "kwargs", "expected"),
[
( # no relativistic species
(75.0, 0.3, 0.6),
{},
[3051.68786716, 4756.17714818, 5822.38084257, 6562.70873734] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25, 0.5),
{"Tcmb0": 3.0, "Neff": 3, "m_nu": 0 * u.eV},
[2997.8115653, 4686.45599916, 5764.54388557, 6524.17408738] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.25, 0.5),
{"Tcmb0": 3.0, "Neff": 4, "m_nu": 5 * u.eV},
[2676.73467639, 3940.57967585, 4686.90810278, 5191.54178243] * u.Mpc,
),
],
)
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
super().test_comoving_distance_example(
cosmo_cls, args, {**COMOVING_DISTANCE_EXAMPLE_KWARGS, **kwargs}, expected
)
class TestFlatw0wzCDM(FlatFLRWMixinTest, Testw0wzCDM):
"""Test :class:`astropy.cosmology.Flatw0wzCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = Flatw0wzCDM
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = (
'Flatw0wzCDM(name="ABCMeta", H0=70.0 km / (Mpc s),'
" Om0=0.27, w0=-1.0, wz=0.5, Tcmb0=3.0 K,"
" Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.03)"
)
assert repr(cosmo) == expected
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize(
("args", "kwargs", "expected"),
[
( # no relativistic species
(75.0, 0.3),
{},
[3156.41804372, 4951.19475878, 6064.40591021, 6831.18710042] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25),
{"Tcmb0": 3.0, "Neff": 3, "m_nu": 0 * u.eV},
[3268.38450997, 5205.96494068, 6419.75447923, 7257.77819438] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.25),
{"Tcmb0": 3.0, "Neff": 4, "m_nu": 5 * u.eV},
[2536.77159626, 3721.76294016, 4432.3526772, 4917.90352107] * u.Mpc,
),
],
)
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
super().test_comoving_distance_example(
cosmo_cls, args, {**COMOVING_DISTANCE_EXAMPLE_KWARGS, **kwargs}, expected
)
##############################################################################
# Miscellaneous
# TODO: these should be better integrated into the new test framework
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_de_densityscale():
cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert u.allclose(
cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129, 0.0035916468],
rtol=1e-4,
)
assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7)
assert u.allclose(
cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1.0, 2.0, 3.0]),
rtol=1e-7,
)
# Flat tests
cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-1, wz=0.5)
flatcosmo = Flatw0wzCDM(H0=70, Om0=0.3, w0=-1, wz=0.5)
assert u.allclose(
cosmo.de_density_scale(z), flatcosmo.de_density_scale(z), rtol=1e-4
)
|
c2973540791c1f71f15ab3125cda20c28739853d8b48e7fd9bbea024b6dee069 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import numpy as np
import astropy.units as u
from . import _stats
__all__ = [
"gaussian_fwhm_to_sigma",
"gaussian_sigma_to_fwhm",
"binom_conf_interval",
"binned_binom_proportion",
"poisson_conf_interval",
"median_absolute_deviation",
"mad_std",
"signal_to_noise_oir_ccd",
"bootstrap",
"kuiper",
"kuiper_two",
"kuiper_false_positive_probability",
"cdf_from_intervals",
"interval_overlap_length",
"histogram_intervals",
"fold_intervals",
]
__doctest_skip__ = ["binned_binom_proportion"]
__doctest_requires__ = {
"binom_conf_interval": ["scipy"],
"poisson_conf_interval": ["scipy"],
}
gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1.0 / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
def binom_conf_interval(k, n, confidence_level=0.68269, interval="wilson"):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]])
"""
if confidence_level < 0.0 or confidence_level > 1.0:
raise ValueError("confidence_level must be between 0. and 1.")
alpha = 1.0 - confidence_level
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError("n must be positive")
if (k < 0).any() or (k > n).any():
raise ValueError("k must be in {0, 1, .., n}")
if interval == "wilson" or interval == "wald":
from scipy.special import erfinv
kappa = np.sqrt(2.0) * min(erfinv(confidence_level), 1.0e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == "wilson":
midpoint = (k + kappa**2 / 2.0) / (n + kappa**2)
halflength = (
(kappa * np.sqrt(n))
/ (n + kappa**2)
* np.sqrt(p * (1 - p) + kappa**2 / (4 * n))
)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.0] = 0.0
conf_interval[conf_interval > 1.0] = 1.0
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1.0 - p) / n)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
elif interval == "jeffreys" or interval == "flat":
from scipy.special import betaincinv
if interval == "jeffreys":
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1.0 - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1.0 - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.0
elif k == n:
upperbound = 1.0
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f"Unrecognized interval: {interval:s}")
return conf_interval
def binned_binom_proportion(
x, success, bins=10, range=None, confidence_level=0.68269, interval="wilson"
):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError("sizes of x and success must match")
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.0
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(
k, n, confidence_level=confidence_level, interval=interval
)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, confidence_level, name):
if sigma != 1:
raise ValueError(f"Only sigma=1 supported for interval {name}")
if background != 0:
raise ValueError(f"background not supported for interval {name}")
if confidence_level is not None:
raise ValueError(f"confidence_level not supported for interval {name}")
def poisson_conf_interval(
n, interval="root-n", sigma=1, background=0, confidence_level=None
):
r"""Poisson parameter confidence interval given observed counts.
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also discusses several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <http://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]])
"""
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == "root-n":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
elif interval == "root-n-0":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == "pearson":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array(
[n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)]
)
elif interval == "sherpagehrels":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)])
elif interval == "frequentist-confidence":
_check_poisson_conf_inputs(1.0, background, confidence_level, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array(
[
0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha),
]
)
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == "kraft-burrows-nousek":
# Deprecation warning in Python 3.9 when N is float, so we force int,
# see https://github.com/astropy/astropy/issues/10832
if np.isscalar(n):
if not isinstance(n, int):
raise TypeError("Number of counts must be integer.")
elif not issubclass(n.dtype.type, np.integer):
raise TypeError("Number of counts must be integer.")
if confidence_level is None:
raise ValueError(
f"Set confidence_level for method {interval}. (sigma is ignored.)"
)
confidence_level = np.asanyarray(confidence_level)
if np.any(confidence_level <= 0) or np.any(confidence_level >= 1):
raise ValueError("confidence_level must be a number between 0 and 1.")
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError("Background must be >= 0.")
conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)(
n, background, confidence_level
)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}")
return conf_interval
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median # drops units if result is NaN
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (
isinstance(data, u.Quantity)
and func is np.median
and data_median.ndim == 0
and np.isnan(data_median)
):
data_median = data.__array_wrap__(data_median)
# broadcast the median array before subtraction
if axis is not None:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (
isinstance(data, u.Quantity)
and func is np.median
and result.ndim == 0
and np.isnan(result)
):
result = data.__array_wrap__(result)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(
t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd**2
)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <http://mpmath.org/>`_ library.
"""
from math import exp
from scipy.integrate import quad
from scipy.optimize import brentq
from scipy.special import factorial
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
return 1.0 / (exp(-B) * np.sum(np.power(B, n) / factorial(n)))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.0
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <http://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
"""
from mpmath import exp, factorial, findroot, fsum, mpf, power, quad
# We convert these values to float. Because for some reason,
# mpmath.mpf cannot convert from numpy.int64
N = mpf(float(N))
B = mpf(float(B))
CL = mpf(float(CL))
tol = 1e-4
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1.0 / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
# If B > N, then N-B, the "most probable" values is < 0
# and thus s_min is certainly 0.
# Note: For small N, s_max is also close to 0 and root finding
# might find the wrong root, thus it is important to handle this
# case here and return the analytical answer (s_min = 0).
if (B >= N) or (eqn7(0, N, B) >= y_S_max):
return 0.0
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, [0.0, N - B], solver="ridder", tol=tol)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
# Several numerical problems were found prevent the solvers from finding
# the roots unless the starting values are very close to the final values.
# Thus, this primitive, time-wasting, brute-force stepping here to get
# an interval that can be fed into the ridder solver.
s_max_guess = max(N - B, 1.0)
while func(s_max_guess) < 0:
s_max_guess += 1
S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver="ridder", tol=tol)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
"""
from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError("Need mpmath package for input numbers this large.")
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError("Either scipy or mpmath are required.")
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import comb, factorial
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import comb, factorial
if D < 0.0 or D > 2.0:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2.0 / N:
return 1.0 - factorial(N) * (D - 1.0 / N) ** (N - 1)
elif D < 3.0 / N:
k = -(N * D - 1.0) / 2.0
r = np.sqrt(k**2 - (N * D - 2.0) ** 2 / 2.0)
a, b = -k + r, -k - r
return 1 - (
factorial(N - 1)
* (b ** (N - 1) * (1 - a) - a ** (N - 1) * (1 - b))
/ N ** (N - 2)
/ (b - a)
)
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.0) / (2.0 * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y ** (t - 3) * (
y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2
)
term1 = comb(N, t)
term2 = (1 - D - t / N) ** (N - t - 1)
# term1 is formally finite, but is approximated by numpy as np.inf for
# large values, so we set them to zero manually when they would be
# multiplied by zero anyway
term1[(term1 == np.inf) & (term2 == 0)] = 0.0
final_term = Tt * term1 * term2
return final_term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (
ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)
).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = np.amax(cdfv - np.arange(N) / float(N)) + np.amax(
(np.arange(N) + 1) / float(N) - cdfv
)
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
(n1,) = data1.shape
(n2,) = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (
np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)
):
raise ValueError("kuiper_two only accepts real inputs")
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError("kuiper_two only accepts non-nan inputs")
D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for a, b, wt in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.0)
breaks.add(1.0)
breaks = sorted(breaks)
breaks_map = {f: i for (i, f) in enumerate(breaks)}
totals = np.zeros(len(breaks) - 1)
totals += tot
for a, b, wt in r:
totals[breaks_map[a] : breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError("Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.0
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end))
h[j] += ol / (1.0 / n) * totals[i]
start = end
return h
|
454f0e74b22ab12178d207aa3d37ddfa85df587111af8a6fed5f79b077268cbb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import sys
import types
import warnings
import weakref
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy import log
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.units import Quantity, QuantityInfo
from astropy.utils import ShapedLikeNDArray, isiterable
from astropy.utils.console import color_print
from astropy.utils.data_info import BaseColumnInfo, DataInfo, MixinInfo
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
from astropy.utils.metadata import MetaAttribute, MetaData
from . import conf, groups
from .column import (
BaseColumn,
Column,
FalseArray,
MaskedColumn,
_auto_names,
_convert_sequence_data_to_array,
col_copy,
)
from .connect import TableRead, TableWrite
from .index import (
Index,
SlicedIndex,
TableILoc,
TableIndices,
TableLoc,
TableLocIndices,
_IndexModeContext,
get_index,
)
from .info import TableInfo
from .mixins.registry import get_mixin_handler
from .ndarray_mixin import NdarrayMixin # noqa: F401
from .pprint import TableFormatter
from .row import Row
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = [
"Table.read",
"Table.write",
"Table._read",
"Table.convert_bytestring_to_unicode",
"Table.convert_unicode_to_bytestring",
]
__doctest_requires__ = {"*pandas": ["pandas>=1.1"]}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of lines in table output.
max_width : int or None
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = "O" if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, "shape") else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
"""Check if the object's info is an instance of cls."""
# We check info on the class of the instance, since on the instance
# itself accessing 'info' has side effects in that it sets
# obj.__dict__['info'] if it does not exist already.
return isinstance(getattr(obj.__class__, "info", None), cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, Mapping):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError(
f"Illegal key or index value for {type(self).__name__} object"
)
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError(
f"Cannot replace column '{item}'. Use Table.replace_column() instead."
)
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return f"<{self.__class__.__name__} names=({','.join(names)})>"
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
# Rename column names in pprint include/exclude attributes as needed
parent_table = self[name].info.parent_table
if parent_table is not None:
parent_table.pprint_exclude_names._rename(name, new_name)
parent_table.pprint_include_names._rename(name, new_name)
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
def __delitem__(self, name):
# Remove column names from pprint include/exclude attributes as needed.
# __delitem__ also gets called for pop() and popitem().
parent_table = self[name].info.parent_table
if parent_table is not None:
# _remove() method does not require that `name` is in the attribute
parent_table.pprint_exclude_names._remove(name)
parent_table.pprint_include_names._remove(name)
return super().__delitem__(name)
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableAttribute(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
OrderedDict([('__attributes__', {'identifier': 10})])
"""
class PprintIncludeExclude(TableAttribute):
"""Maintain tuple that controls table column visibility for print output.
This is a descriptor that inherits from MetaAttribute so that the attribute
value is stored in the table meta['__attributes__'].
This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table
attributes.
"""
def __get__(self, instance, owner_cls):
"""Get the attribute.
This normally returns an instance of this class which is stored on the
owner object.
"""
# For getting from class not an instance
if instance is None:
return self
# If not already stored on `instance`, make a copy of the class
# descriptor object and put it onto the instance.
value = instance.__dict__.get(self.name)
if value is None:
value = deepcopy(self)
instance.__dict__[self.name] = value
# We set _instance_ref on every call, since if one makes copies of
# instances, this attribute will be copied as well, which will lose the
# reference.
value._instance_ref = weakref.ref(instance)
return value
def __set__(self, instance, names):
"""Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
if isinstance(names, str):
names = [names]
if names is None:
# Remove attribute value from the meta['__attributes__'] dict.
# Subsequent access will just return None.
delattr(instance, self.name)
else:
# This stores names into instance.meta['__attributes__'] as tuple
return super().__set__(instance, tuple(names))
def __call__(self):
"""Get the value of the attribute.
Returns
-------
names : None, tuple
Include/exclude names
"""
# Get the value from instance.meta['__attributes__']
instance = self._instance_ref()
return super().__get__(instance, instance.__class__)
def __repr__(self):
if hasattr(self, "_instance_ref"):
out = f"<{self.__class__.__name__} name={self.name} value={self()}>"
else:
out = super().__repr__()
return out
def _add_remove_setup(self, names):
"""Common setup for add and remove.
- Coerce attribute value to a list
- Coerce names into a list
- Get the parent table instance
"""
names = [names] if isinstance(names, str) else list(names)
# Get the value. This is the same as self() but we need `instance` here.
instance = self._instance_ref()
value = super().__get__(instance, instance.__class__)
value = [] if value is None else list(value)
return instance, names, value
def add(self, names):
"""Add ``names`` to the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to add
"""
instance, names, value = self._add_remove_setup(names)
value.extend(name for name in names if name not in value)
super().__set__(instance, tuple(value))
def remove(self, names):
"""Remove ``names`` from the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to remove
"""
self._remove(names, raise_exc=True)
def _remove(self, names, raise_exc=False):
"""Remove ``names`` with optional checking if they exist."""
instance, names, value = self._add_remove_setup(names)
# Return now if there are no attributes and thus no action to be taken.
if not raise_exc and "__attributes__" not in instance.meta:
return
# Remove one by one, optionally raising an exception if name is missing.
for name in names:
if name in value:
value.remove(name) # Using the list.remove method
elif raise_exc:
raise ValueError(f"{name} not in {self.name}")
# Change to either None or a tuple for storing back to attribute
value = None if value == [] else tuple(value)
self.__set__(instance, value)
def _rename(self, name, new_name):
"""Rename ``name`` to ``new_name`` if ``name`` is in the list."""
names = self() or ()
if name in names:
new_names = list(names)
new_names[new_names.index(name)] = new_name
self.set(new_names)
def set(self, names):
"""Set value of include/exclude attribute to ``names``.
Parameters
----------
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
class _Context:
def __init__(self, descriptor_self):
self.descriptor_self = descriptor_self
self.names_orig = descriptor_self()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
descriptor_self = self.descriptor_self
instance = descriptor_self._instance_ref()
descriptor_self.__set__(instance, self.names_orig)
def __repr__(self):
return repr(self.descriptor_self)
ctx = _Context(descriptor_self=self)
instance = self._instance_ref()
self.__set__(instance, names)
return ctx
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: https://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
pprint_exclude_names = PprintIncludeExclude()
pprint_include_names = PprintIncludeExclude()
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : array or `~numpy.ma.MaskedArray`
Copy of table as a numpy structured array.
ndarray for unmasked or `~numpy.ma.MaskedArray` for masked.
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
if not (col.info.dtype.isnative or keep_byteorder):
new_dt = np.dtype(col_descr[1]).newbyteorder("=")
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, "mask"):
data[col.info.name].mask = col.mask
return data
def __init__(
self,
data=None,
masked=False,
names=None,
dtype=None,
meta=None,
copy=True,
rows=None,
copy_indices=True,
units=None,
descriptions=None,
**kwargs,
):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError("Cannot specify dtype when copy=False")
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError("Cannot supply both `data` and `rows` values")
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
# Handle custom (subclass) table attributes that are stored in meta.
# These are defined as class attributes using the TableAttribute
# descriptor. Any such attributes get removed from kwargs here and
# stored for use after the table is otherwise initialized. Any values
# provided via kwargs will have precedence over existing values from
# meta (e.g. from data as a Table or meta via kwargs).
meta_table_attrs = {}
if kwargs:
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, TableAttribute):
meta_table_attrs[attr] = kwargs.pop(attr)
if hasattr(data, "__astropy_table__"):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError(
f"__init__() got unexpected keyword argument {list(kwargs.keys())[0]!r}"
)
if isinstance(data, np.ndarray) and data.shape == (0,) and not data.dtype.names:
data = None
if isinstance(data, self.Row):
data = data._table[data._index : data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (
names_from_list_of_dict or _get_names_from_list_of_dict(data)
)
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError("Can not initialize a Table with a scalar")
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
# Table was initialized as `t = Table()`. Set up for empty
# table with names=[], data=[], and n_cols=0.
# self._init_from_list() will simply return, giving the
# expected empty table.
names = []
else:
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError(
"dtype was specified but could not be "
"parsed for column names"
)
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError(f"Data type {type(data)} not allowed to init Table")
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if dtype is None:
dtype = [None] * n_cols
elif isinstance(dtype, np.dtype):
if default_names is None:
default_names = dtype.names
# Convert a numpy dtype input to a list of dtypes for later use.
dtype = [dtype[name] for name in dtype.names]
if names is None:
names = default_names or [None] * n_cols
names = [None if name is None else str(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Update meta with TableAttributes supplied as kwargs in Table init.
# This takes precedence over previously-defined meta.
if meta_table_attrs:
for attr, value in meta_table_attrs.items():
setattr(self, attr, value)
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute("unit", units)
self._set_column_attribute("description", descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, Mapping):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(
f"sequence of {attr} values must match number of columns"
)
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(
f"invalid column name {name} for setting {attr} attribute"
)
# Special case: ignore unit if it is an empty or blank string
if attr == "unit" and isinstance(value, str):
if value.strip() == "":
value = None
if value not in (np.ma.masked, None):
col = self[name]
if attr == "unit" and isinstance(col, Quantity):
# Update the Quantity unit in-place
col <<= value
else:
setattr(col.info, attr, value)
def __getstate__(self):
columns = OrderedDict(
(key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items()
)
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table(
[
getattr(col, "mask", FalseArray(col.shape))
for col in self.itercols()
],
names=self.colnames,
copy=False,
)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property.
"""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : `~astropy.table.Table`
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [
col.filled(fill_value) if hasattr(col, "filled") else col
for col in self.itercols()
]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
"""
Return the indices associated with columns of the table
as a TableIndices object.
"""
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum(index is x for x in lst) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
"""
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
"""
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
"""
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
"""
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
"""
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, either `~astropy.table.SortedArray`,
`~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied
argument is None (by default), use `~astropy.table.SortedArray`.
unique : bool
Whether the values of the index must be unique. Default is False.
"""
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, "_supports_indexing", False):
raise ValueError(
'Cannot create an index on column "{}", of type "{}"'.format(
col.info.name, type(col)
)
)
is_primary = not self.indices
index = Index(columns, engine=engine, unique=unique)
sliced_index = SlicedIndex(index, slice(0, 0, None), original=True)
if is_primary:
self.primary_key = colnames
for col in columns:
col.info.indices.append(sliced_index)
def remove_indices(self, colname):
"""
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
"""
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
"""
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
"""
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
if np.dtype(dtype) != object:
raise ValueError("Datatype coercion is not allowed")
out = np.array(None, dtype=object)
out[()] = self
return out
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, "dtype"), (names, "names")):
if not isiterable(inp_list):
raise ValueError(f"{inp_str} must be a list or None")
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
if set(data[0].keys()) == names_from_data:
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
# Special case of initializing an empty table like `t = Table()`. No
# action required at this point.
if n_cols == 0:
return
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dtype in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dtype, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(
self, data, copy=True, default_name=None, dtype=None, name=None
):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# If the data is not an instance of Column or a mixin class, we can
# check the registry of mixin 'handlers' to see if the column can be
# converted to a mixin class
if (handler := get_mixin_handler(data)) is not None:
original_data = data
data = handler(data)
if not (data_is_mixin := self._is_mixin_for_table(data)):
fully_qualified_name = (
original_data.__class__.__module__
+ "."
+ original_data.__class__.__name__
)
raise TypeError(
"Mixin handler for object of type "
f"{fully_qualified_name} "
"did not return a valid mixin column"
)
# Get the final column name using precedence. Some objects may not
# have an info attribute. Also avoid creating info as a side effect.
if not name:
if isinstance(data, Column):
name = data.name or default_name
elif "info" in getattr(data, "__dict__", ()):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute. If not copying, take a slice
# to ensure we get a new instance and we do not share metadata
# like info.
col = col_copy(data, copy_indices=self._init_indices) if copy else data[:]
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, (np.ma.MaskedArray, Masked)):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif data is None:
# Special case for data passed as the None object (for broadcasting
# to an object column). Need to turn data into numpy `None` scalar
# object, otherwise `Column` interprets data=None as no data instead
# of a object column of `None`.
data = np.array(None)
col_cls = self.ColumnClass
elif not hasattr(data, "dtype"):
# `data` is none of the above, convert to numpy array or MaskedArray
# assuming only that it is a scalar or sequence or N-d nested
# sequence. This function is relatively intricate and tries to
# maintain performance for common cases while handling things like
# list input with embedded np.ma.masked entries. If `data` is a
# scalar then it gets returned unchanged so the original object gets
# passed to `Column` later.
data = _convert_sequence_data_to_array(data, dtype)
copy = False # Already made a copy above
col_cls = (
masked_col_cls
if isinstance(data, np.ma.MaskedArray)
else self.ColumnClass
)
else:
col_cls = self.ColumnClass
try:
col = col_cls(
name=name,
data=data,
dtype=dtype,
copy=copy,
copy_indices=self._init_indices,
)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError("unable to convert data to Column for Table")
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array."""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = (
[data[name] for name in data_names]
if struct
else [data[:, i] for i in range(n_cols)]
)
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns."""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects."""
lengths = {len(col) for col in cols}
if len(lengths) > 1:
raise ValueError(f"Inconsistent data column lengths: {lengths}")
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(
table, newcols, verify=False, names=self.columns.keys()
)
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError("Cannot have None for column name")
if len(set(names)) != len(names):
raise ValueError("Duplicate column names")
table.columns = table.TableColumns(
(name, col) for name, col in zip(names, cols)
)
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, "mask"):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(
self,
html=False,
descr_vals=None,
max_width=None,
tableid=None,
show_dtype=True,
max_lines=None,
tableclass=None,
):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append("masked=True")
descr_vals.append(f"length={len(self)}")
descr = " ".join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = f"<i>{xml_escape(descr)}</i>\n"
else:
descr = f"<{descr}>\n"
if tableid is None:
tableid = f"table{id(self)}"
data_lines, outs = self.formatter._pformat_table(
self,
tableid=tableid,
html=html,
max_width=max_width,
show_name=True,
show_unit=None,
show_dtype=show_dtype,
max_lines=max_lines,
tableclass=tableclass,
)
out = descr + "\n".join(data_lines)
return out
def _repr_html_(self):
out = self._base_repr_(
html=True, max_width=-1, tableclass=conf.default_notebook_table_class
)
# Wrap <table> in <div>. This follows the pattern in pandas and allows
# table to be scrollable horizontally in VS Code notebook display.
out = f"<div>{out}</div>"
return out
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return "\n".join(self.pformat())
def __bytes__(self):
return str(self).encode("utf-8")
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
return any(hasattr(col, "mask") and np.any(col.mask) for col in self.itercols())
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not [Masked]Quantity (which gets converted to
# [Masked]Column with unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
n_header = outs["n_header"]
for i, line in enumerate(lines):
if i < n_header:
color_print(line, "red")
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(
max_lines, max_width, show_name, show_unit, show_dtype, align
)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + list(self.columns.values()), copy=False)
else:
return self
def show_in_notebook(
self,
tableid=None,
css=None,
display_length=50,
table_class="astropy-default",
show_row_index="idx",
):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or None
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or None
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from IPython.display import HTML
from .jsviewer import JSViewer
if tableid is None:
tableid = f"table{id(self)}-{np.random.randint(1, 1e6)}"
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == "astropy-default":
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(
html=True,
max_width=-1,
tableid=tableid,
max_lines=-1,
show_dtype=False,
tableclass=table_class,
)
columns = display_table.columns.values()
sortable_columns = [
i for i, col in enumerate(columns) if col.info.dtype.kind in "iufc"
]
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(
self,
max_lines=5000,
jsviewer=False,
browser="default",
jskwargs={"use_local_files": True},
tableid=None,
table_class="display compact",
css=None,
show_row_index="idx",
):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or None
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or None
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import tempfile
import webbrowser
from urllib.parse import urljoin
from urllib.request import pathname2url
from .jsviewer import DEFAULT_CSS
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, "table.html")
with open(path, "w") as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(
tmp,
format="jsviewer",
css=css,
max_lines=max_lines,
jskwargs=jskwargs,
table_id=tableid,
table_class=table_class,
)
else:
self.write(tmp, format="html")
try:
br = webbrowser.get(None if browser == "default" else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin("file:", pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
html=html,
tableid=tableid,
tableclass=tableclass,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(
max_lines,
max_width,
show_name,
show_unit,
show_dtype,
html,
tableid,
align,
tableclass,
)
def more(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
self.formatter._more_tabcol(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__(
[self[x] for x in item], copy_indices=self._copy_indices
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif (isinstance(item, np.ndarray) and item.size == 0) or (
isinstance(item, (tuple, list)) and not item
):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (
isinstance(item, (slice, np.ndarray, list))
or isinstance(item, tuple)
and all(isinstance(x, np.ndarray) for x in item)
):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (
not getattr(self, "_setitem_inplace", False)
and not conf.replace_inplace
):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (
isinstance(item, (slice, np.ndarray, list))
or isinstance(item, tuple)
and all(isinstance(x, np.ndarray) for x in item)
):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError(
"Right side value needs {} elements (one for each column)".format(
n_cols
)
)
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif isinstance(item, (list, tuple, np.ndarray)) and all(
isinstance(x, str) for x in item
):
self.remove_columns(item)
elif (
isinstance(item, (list, np.ndarray)) and np.asarray(item).dtype.kind == "i"
):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError("illegal key or index value")
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception(
"Masked attribute is read-only (use t = Table(t, masked=True)"
" to convert to a masked table)"
)
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings."""
return (
isinstance(names, (tuple, list))
and names
and all(isinstance(x, str) for x in names)
)
def keys(self):
return list(self.columns.keys())
def values(self):
return self.columns.values()
def items(self):
return self.columns.items()
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(
self,
col,
index=None,
name=None,
rename_duplicate=False,
copy=True,
default_name=None,
):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or None
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or None
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = f"col{len(self.columns)}"
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(
col, name=name, copy=copy, default_name=default_name
)
# Assigning a scalar column to an empty table should result in an
# exception (see #3811).
if col.shape == () and len(self) == 0:
raise TypeError("Empty table cannot have column set to scalar value")
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
elif (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, "shape", ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape, subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape, subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError("Inconsistent data column lengths")
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + "_" + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(
self, cols, indexes=None, names=None, copy=True, rename_duplicate=False
):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of object
List of data objects for the new columns
indexes : list of int or None
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
See Also
--------
astropy.table.hstack, update, replace_column
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError("Number of indexes must match number of cols")
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError("Number of names must match number of cols")
default_names = [f"col{ii + len(self.columns)}" for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes, kind="stable")):
self.add_column(
cols[ii],
index=indexes[ii],
name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate,
copy=copy,
)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
refcount = None
old_col = None
# sys.getrefcount is CPython specific and not on PyPy.
if (
"refcount" in warns
and name in self.colnames
and hasattr(sys, "getrefcount")
):
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if "always" in warns:
warnings.warn(
f"replaced column '{name}'", TableReplaceWarning, stacklevel=3
)
if "slice" in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = (
"replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
# sys.getrefcount is CPython specific and not on PyPy.
if "refcount" in warns and hasattr(sys, "getrefcount"):
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = (
"replaced column '{}' and the number of references "
"to the column changed.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if "attributes" in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = "replaced column '{}' and column attributes {} changed.".format(
name, changed_attrs
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : `~astropy.table.Column` or `~numpy.ndarray` or sequence
New column object to replace the existing column.
copy : bool
Make copy of the input ``col``, default=True
See Also
--------
add_columns, astropy.table.hstack, update
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f"column name {name} is not in the table")
if self[name].info.indices:
raise ValueError("cannot replace a table index column")
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError("length of new column must match table length")
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice or int or array of int
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterable
Iterator returns tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f"{name} is not a valid column name")
cols = (self[name] for name in names)
out = zip(*cols)
return out
def _set_of_names_in_colnames(self, names):
"""Return ``names`` as a set if valid, or raise a `KeyError`.
``names`` is valid if all elements in it are in ``self.colnames``.
If ``names`` is a string then it is interpreted as a single column
name.
"""
names = {names} if isinstance(names, str) else set(names)
invalid_names = names.difference(self.colnames)
if len(invalid_names) == 1:
raise KeyError(f'column "{invalid_names.pop()}" does not exist')
elif len(invalid_names) > 1:
raise KeyError(f"columns {invalid_names} do not exist")
return names
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : str or iterable of str
Names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
"""
for name in self._set_of_names_in_colnames(names):
del self.columns[name]
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, "utf-8"))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in (
col.info.attr_names - col.info._attrs_no_copy - {"dtype"}
):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype("S", "U", np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype("U", "S", np.char.encode)
def keep_columns(self, names):
"""
Keep only the columns specified (remove the others).
Parameters
----------
names : str or iterable of str
The columns to keep. All other columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
"""
names = self._set_of_names_in_colnames(names)
for colname in self.colnames:
if colname not in names:
del self.columns[colname]
def rename_column(self, name, new_name):
"""
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
"""
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
"""
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
"""
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError(
"input 'new_names' must be a tuple or a list of column names"
)
if len(names) != len(new_names):
raise ValueError(
"input 'names' and 'new_names' list arguments must be the same length"
)
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError(
"right hand side must be a sequence of values with "
"the same length as the number of selected columns"
)
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError(
f"Index {index} is out of bounds for table with length {N}"
)
if index < 0:
index += N
if isinstance(vals, Mapping) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not isinstance(mask, Mapping):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError("keys in mask should match keys in vals")
if vals and any(name not in colnames for name in vals):
raise ValueError("Keys in vals must all be valid column names")
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, "dtype"):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError("Mismatch between number of vals and columns")
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError("Mismatch between number of masks and columns")
else:
mask = [False] * len(self.columns)
else:
raise TypeError("Vals must be an iterable or mapping or None")
# Insert val at index for each column
columns = self.TableColumns()
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
try:
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if (
mask_
and isinstance(col, Column)
and not isinstance(col, MaskedColumn)
):
col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError(
"Incorrect length for column {} after inserting {}"
" (expected {}, got {})".format(name, val, len(newcol), N + 1)
)
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, "mask"):
newcol[index] = np.ma.masked
else:
raise TypeError(
"mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name)
)
columns[name] = newcol
except Exception as err:
raise ValueError(
"Unable to insert row because of exception in column '{}':\n{}".format(
name, err
)
) from err
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def update(self, other, copy=True):
"""
Perform a dictionary-style update and merge metadata.
The argument ``other`` must be a |Table|, or something that can be used
to initialize a table. Columns from (possibly converted) ``other`` are
added to this table. In case of matching column names the column from
this table is replaced with the one from ``other``.
Parameters
----------
other : table-like
Data to update this table with.
copy : bool
Whether the updated columns should be copies of or references to
the originals.
See Also
--------
add_columns, astropy.table.hstack, replace_column
Examples
--------
Update a table with another table::
>>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0})
>>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2})
>>> t1.update(t2)
>>> t1
<Table length=2>
a b c
str3 float64 float64
---- ------- -------
foo 1.0 7.0
bar 2.0 11.0
>>> t1.meta
{'i': 0, 'n': 2}
Update a table with a dictionary::
>>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]})
>>> t.update({'b': [1., 2.]})
>>> t
<Table length=2>
a b
str3 float64
---- -------
foo 1.0
bar 2.0
"""
from .operations import _merge_table_meta
if not isinstance(other, Table):
other = self.__class__(other, copy=copy)
common_cols = set(self.colnames).intersection(other.colnames)
for name, col in other.items():
if name in common_cols:
self.replace_column(name, col, copy=copy)
else:
self.add_column(col, name=name, copy=copy)
_merge_table_meta(self, [self, other], metadata_conflicts="silent")
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is slower than ndarray (e.g. a
# factor of ~6 for a 10 million long random array), and much slower
# for in principle sortable columns like Time, which get stored as
# object arrays.
if len(keys) > 1:
kwargs["order"] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]]
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs["kind"] = kind
# np.argsort will look for a possible .argsort method (e.g., for Time),
# and if that fails cast to an array and try sorting that way.
idx = np.argsort(data, **kwargs)
return idx[::-1] if reverse else idx
def sort(self, keys=None, *, kind=None, reverse=False):
"""
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
"""
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys, kind=kind, reverse=reverse)
with self.index_mode("freeze"):
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
def reverse(self):
"""
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
"""
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
def round(self, decimals=0):
"""
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
"""
if isinstance(decimals, Mapping):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
def copy(self, copy_data=True):
"""
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
"""
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, "_groups"):
out._groups = groups.TableGroups(
out, indices=self._groups._indices, keys=self._groups._keys
)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError("cannot compare tables with different column names")
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
eq = self[name] == other[name]
if (
warns
and issubclass(warns[-1].category, FutureWarning)
and "elementwise comparison failed" in str(warns[-1].message)
):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f"unable to compare column {name}") from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (
isinstance(eq, np.ndarray)
and eq.dtype is np.dtype("bool")
and len(eq) == len(self)
):
raise TypeError(
f"comparison for column {name} returned {eq} "
"instead of the expected boolean ndarray"
)
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, "_groups"):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``.
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None, use_nullable_int=True):
"""
Return a :class:`pandas.DataFrame` instance.
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In addition to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Parameters
----------
index : None, bool, str
Specify DataFrame index mode
use_nullable_int : bool, default=True
Convert integer MaskedColumn to pandas nullable integer type.
If ``use_nullable_int=False`` or the pandas version does not support
nullable integer types (version < 0.24), then the column is converted
to float with NaN for missing elements and a warning is issued.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 0 days 00:00:03
2002-01-01 2.0 6.0 8.0 0 days 00:03:20
"""
from pandas import DataFrame, Series
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError(
"index must be None, False, True or a table column name"
)
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.time import TimeBase, TimeDelta
from . import serialize
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = (
col_copy(col, copy_indices=False) if col.info.indices else col
)
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
# Certain subclasses (e.g. TimeSeries) may generate new indices on
# table creation, so make sure there are no indices on the table.
for col in tbl.itercols():
col.info.indices.clear()
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype("timedelta64[ns]")
nat = np.timedelta64("NaT")
else:
new_col = col.datetime64.copy()
nat = np.datetime64("NaT")
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
# fmt: off
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)'
)
# fmt: on
out = OrderedDict()
for name, column in tbl.columns.items():
if getattr(column.dtype, "isnative", True):
out[name] = column
else:
out[name] = column.data.byteswap().newbyteorder("=")
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ["i", "u"]:
pd_dtype = column.dtype.name
if use_nullable_int:
# Convert int64 to Int64, uint32 to UInt32, etc for nullable types
pd_dtype = pd_dtype.replace("i", "I").replace("u", "U")
out[name] = Series(out[name], dtype=pd_dtype)
# If pandas is older than 0.24 the type may have turned to float
if column.dtype.kind != out[name].dtype.kind:
warnings.warn(
f"converted column '{name}' from {column.dtype} to"
f" {out[name].dtype}",
TableReplaceWarning,
stacklevel=3,
)
elif column.dtype.kind not in ["f", "c"]:
out[name] = column.astype(object).filled(np.nan)
kwargs = {}
if index:
idx = out.pop(index)
kwargs["index"] = idx
# We add the table index to Series inputs (MaskedColumn with int values) to override
# its default RangeIndex, see #11432
for v in out.values():
if isinstance(v, Series):
v.index = idx
df = DataFrame(out, **kwargs)
if index:
# Explicitly set the pandas DataFrame index to the original table
# index name.
df.index.name = idx.info.name
return df
@classmethod
def from_pandas(cls, dataframe, index=False, units=None):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance.
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
units: dict
A dict mapping column names to to a `~astropy.units.Unit`.
The columns will have the specified unit in the Table.
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 0 days 00:00:01 3.0
1 2002-01-01 0 days 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
Time TimeDelta float64
----------------------- --------- -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or "index"
while index_name in names:
index_name = "_" + index_name + "_"
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
if units is None:
units = [None] * len(names)
else:
if not isinstance(units, Mapping):
raise TypeError('Expected a Mapping "column-name" -> "unit"')
not_found = set(units.keys()) - set(names)
if not_found:
warnings.warn(f"`units` contains additional columns: {not_found}")
units = [units.get(name) for name in names]
for name, column, data, mask, unit in zip(names, columns, datas, masks, units):
if column.dtype.kind in ["u", "i"] and np.any(mask):
# Special-case support for pandas nullable int
np_dtype = str(column.dtype).lower()
data = np.zeros(shape=column.shape, dtype=np_dtype)
data[~mask] = column[~mask]
out[name] = MaskedColumn(
data=data, name=name, mask=mask, unit=unit, copy=False
)
continue
if data.dtype.kind == "O":
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b""
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == "M":
from astropy.time import Time
out[name] = Time(data, format="datetime64")
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = "isot"
# Numpy timedelta64
elif data.dtype.kind == "m":
from astropy.time import TimeDelta
data_sec = data.astype("timedelta64[ns]").astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format="sec")
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)
else:
out[name] = Column(data=data, name=name, unit=unit)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
For more information see:
- https://docs.astropy.org/en/stable/table/
- https://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, "unit", None) is not None:
# We need to turn the column into a quantity; use subok=True to allow
# Quantity subclasses identified in the unit (such as u.mag()).
q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity
try:
qcol = q_cls(col.data, col.unit, copy=False, subok=True)
except Exception as exc:
warnings.warn(
f"column {col.info.name} has a unit but is kept as "
f"a {col.__class__.__name__} as an attempt to "
f"convert it to Quantity failed with:\n{exc!r}",
AstropyUserWarning,
)
else:
qcol.info = col.info
qcol.info.indices = col.info.indices
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
|
4c45469187dae8a197983428071c7370a98560c00d11e1e7e03f137d242cc649 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
# pylint: disable=invalid-name
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
from importlib.metadata import entry_points
import numpy as np
from astropy.units import Quantity
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyUserWarning
from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex
from .spline import (
SplineExactKnotsFitter,
SplineInterpolateFitter,
SplineSmoothingFitter,
SplineSplrepFitter,
)
from .statistic import leastsquare
from .utils import _combine_equivalency_dict, poly_map_domain
__all__ = [
"LinearLSQFitter",
"LevMarLSQFitter",
"TRFLSQFitter",
"DogBoxLSQFitter",
"LMLSQFitter",
"FittingWithOutlierRemoval",
"SLSQPLSQFitter",
"SimplexLSQFitter",
"JointFitter",
"Fitter",
"ModelLinearityError",
"ModelsError",
"SplineExactKnotsFitter",
"SplineInterpolateFitter",
"SplineSmoothingFitter",
"SplineSplrepFitter",
]
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
class NonFiniteValueError(RuntimeError):
"""
Error raised when attempting to a non-finite value.
"""
class Covariance:
"""Class for covariance matrix calculated by fitter."""
def __init__(self, cov_matrix, param_names):
self.cov_matrix = cov_matrix
self.param_names = param_names
def pprint(self, max_lines, round_val):
# Print and label lower triangle of covariance matrix
# Print rows for params up to `max_lines`, round floats to 'round_val'
longest_name = max(len(x) for x in self.param_names)
ret_str = "parameter variances / covariances \n"
fstring = f'{"": <{longest_name}}| {{0}}\n'
for i, row in enumerate(self.cov_matrix):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += fstring.replace(" " * len(param), param, 1).format(
repr(np.round(row[: i + 1], round_val))[7:-2]
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, params):
# index covariance matrix by parameter names or indices
if len(params) != 2:
raise ValueError("Covariance must be indexed by two values.")
if all(isinstance(item, str) for item in params):
i1, i2 = self.param_names.index(params[0]), self.param_names.index(
params[1]
)
elif all(isinstance(item, int) for item in params):
i1, i2 = params
else:
raise TypeError(
"Covariance can be indexed by two parameter names or integer indices."
)
return self.cov_matrix[i1][i2]
class StandardDeviations:
"""Class for fitting uncertainties."""
def __init__(self, cov_matrix, param_names):
self.param_names = param_names
self.stds = self._calc_stds(cov_matrix)
def _calc_stds(self, cov_matrix):
# sometimes scipy lstsq returns a non-sensical negative vals in the
# diagonals of the cov_x it computes.
stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)]
return stds
def pprint(self, max_lines, round_val):
longest_name = max(len(x) for x in self.param_names)
ret_str = "standard deviations\n"
for i, std in enumerate(self.stds):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += (
f"{param}{' ' * (longest_name - len(param))}| "
f"{np.round(std, round_val)}\n"
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, param):
if isinstance(param, str):
i = self.param_names.index(param)
elif isinstance(param, int):
i = param
else:
raise TypeError(
"Standard deviation can be indexed by parameter name or integer."
)
return self.stds[i]
class ModelsError(Exception):
"""Base class for model exceptions."""
class ModelLinearityError(ModelsError):
"""Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith("_"):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop("equivalencies", None)
data_has_units = (
isinstance(x, Quantity)
or isinstance(y, Quantity)
or isinstance(z, Quantity)
)
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies
)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(
model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]],
)
if isinstance(y, Quantity) and z is not None:
y = y.to(
model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]],
)
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data["z"] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
if isinstance(model, tuple):
rename_data["_left_kwargs"] = model[1]
rename_data["_right_kwargs"] = model[2]
model = model[0]
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError(
"This model does not support being fit to data with units."
)
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
supported_constraints = []
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@staticmethod
def _add_fitting_uncertainties(*args):
"""
When available, calculate and sets the parameter covariance matrix
(model.cov_matrix) and standard deviations (model.stds).
"""
return None
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ["fixed"]
supports_masked_input = True
def __init__(self, calc_uncertainties=False):
self.fit_info = {
"residuals": None,
"rank": None,
"singular_values": None,
"params": None,
}
self._calc_uncertainties = calc_uncertainties
@staticmethod
def _is_invertible(m):
"""Check if inverse of matrix can be obtained."""
if m.shape[0] != m.shape[1]:
return False
if np.linalg.matrix_rank(m) < m.shape[0]:
return False
return True
def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None, resids=None):
"""
Calculate and parameter covariance matrix and standard deviations
and set `cov_matrix` and `stds` attributes.
"""
x_dot_x_prime = np.dot(a.T, a)
masked = False or hasattr(y, "mask")
# check if invertible. if not, can't calc covariance.
if not self._is_invertible(x_dot_x_prime):
return model
inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime)
if z is None: # 1D models
if len(model) == 1: # single model
mask = None
if masked:
mask = y.mask
xx = np.ma.array(x, mask=mask)
RSS = [(1 / (xx.count() - n_coeff)) * resids]
if len(model) > 1: # model sets
RSS = [] # collect sum residuals squared for each model in set
for j in range(len(model)):
mask = None
if masked:
mask = y.mask[..., j].flatten()
xx = np.ma.array(x, mask=mask)
eval_y = model(xx, model_set_axis=False)
eval_y = np.rollaxis(eval_y, model.model_set_axis)[j]
RSS.append(
(1 / (xx.count() - n_coeff)) * np.sum((y[..., j] - eval_y) ** 2)
)
else: # 2D model
if len(model) == 1:
mask = None
if masked:
warnings.warn(
"Calculation of fitting uncertainties "
"for 2D models with masked values not "
"currently supported.\n",
AstropyUserWarning,
)
return
xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask)
# len(xx) instead of xx.count. this will break if values are masked?
RSS = [(1 / (len(xx) - n_coeff)) * resids]
else:
RSS = []
for j in range(len(model)):
eval_z = model(x, y, model_set_axis=False)
mask = None # need to figure out how to deal w/ masking here.
if model.model_set_axis == 1:
# model_set_axis passed when evaluating only refers to input shapes
# so output must be reshaped for model_set_axis=1.
eval_z = np.rollaxis(eval_z, 1)
eval_z = eval_z[j]
RSS.append(
[(1 / (len(x) - n_coeff)) * np.sum((z[j] - eval_z) ** 2)]
)
covs = [inv_x_dot_x_prime * r for r in RSS]
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
if len(covs) == 1:
model.cov_matrix = Covariance(covs[0], model.param_names)
model.stds = StandardDeviations(covs[0], free_param_names)
else:
model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs]
model.stds = [StandardDeviations(cov, free_param_names) for cov in covs]
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, "domain") and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, "window") and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, "x_domain") and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, "y_domain") and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, "x_window") and model.x_window is None:
model.x_window = [-1.0, 1.0]
if hasattr(model, "y_window") and model.y_window is None:
model.y_window = [-1.0, 1.0]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like, optional
Input coordinates.
If the dependent (``y`` or ``z``) coordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
coordinate grids differ.
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError(
"Model is not linear in parameters, "
"linear fit methods should not be used."
)
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
model_copy.sync_constraints = False
_, fitparam_indices, _ = model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(
x, y, z, n_models=len(model_copy), model_set_axis=model_copy.model_set_axis
)
n_fixed = sum(model_copy.fixed.values())
# This is also done by _convert_inputs, but we need it here to allow
# checking the array dimensionality before that gets called:
if weights is not None:
weights = np.asarray(weights, dtype=float)
if n_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [
idx
for idx in range(len(model_copy.param_names))
if idx not in fitparam_indices
]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray(
[
getattr(model_copy, model_copy.param_names[idx]).value
for idx in fixparam_indices
]
)
if len(farg) == 2:
x, y = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, weights = _convert_input(
x,
weights,
n_models=len(model_copy) if weights.ndim == y.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "domain"):
x = self._map_domain_window(model_copy, x)
if n_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x
)
else:
lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, _, weights = _convert_input(
x,
y,
weights,
n_models=len(model_copy) if weights.ndim == z.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "x_domain"):
x, y = self._map_domain_window(model_copy, x, y)
if n_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x, y=y)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x, y=y
)
else:
lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
if weights is not None:
# Same for weights
if weights.ndim > 2:
# Separate 2D weights for each model:
weights = np.rollaxis(weights, model_axis, weights.ndim)
weights = weights.reshape(-1, weights.shape[-1])
elif weights.ndim == z.ndim:
# Separate, flattened weights for each model:
weights = weights.T if model_axis == 0 else weights
else:
# Common weights for all the models:
weights = weights.flatten()
else:
rhs = z.flatten()
if weights is not None:
weights = weights.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if np.asanyarray(lhs).ndim > 2:
raise ValueError(
f"{type(model_copy).__name__} gives unsupported >2D "
"derivative matrix for this x/y"
)
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if n_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input coordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
if rhs.ndim == 2:
if weights.shape == rhs.shape:
# separate weights for multiple models case: broadcast
# lhs to have more dimension (for each model)
lhs = lhs[..., np.newaxis] * weights[:, np.newaxis]
rhs = rhs * weights
else:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original
# dependent variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if weights is not None and not masked and np.any(np.isnan(lhs)):
raise ValueError(
"Found NaNs in the coefficient matrix, which "
"should not happen and would crash the lapack "
"routine. Maybe check that weights are not null."
)
a = None # need for calculating covarience
if (masked and len(model_copy) > 1) or (
weights is not None and weights.ndim > 1
):
# Separate masks or weights for multiple models case: Numpy's
# lstsq supports multiple dimensions only for rhs, so we need to
# loop manually on the models. This may be fixed in the future
# with https://github.com/numpy/numpy/pull/15777.
# Initialize empty array of coefficients and populate it one model
# at a time. The shape matches the number of coefficients from the
# Vandermonde matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype)
# Arrange the lhs as a stack of 2D matrices that we can iterate
# over to get the correctly-orientated lhs for each model:
if lhs.ndim > 2:
lhs_stack = np.rollaxis(lhs, -1, 0)
else:
lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask if masked else slice(None)
model_lhs = model_lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
a = model_lhs
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(
model_lhs, model_rhs, rcond
)
model_lacoef[:] = t_coef.T
else:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
a = lhs[good]
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good], rhs[good], rcond)
self.fit_info["residuals"] = resids
self.fit_info["rank"] = rank
self.fit_info["singular_values"] = sval
lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl
self.fit_info["params"] = lacoef
fitter_to_model_params(model_copy, lacoef.flatten())
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if (
hasattr(model_copy, "_order")
and len(model_copy) == 1
and rank < (model_copy._order - n_fixed)
):
warnings.warn("The fit may be poorly conditioned\n", AstropyUserWarning)
# calculate and set covariance matrix and standard devs. on model
if self._calc_uncertainties:
if len(y) > len(lacoef):
self._add_fitting_uncertainties(
model_copy, a * scl, len(lacoef), x, y, z, resids
)
model_copy.sync_constraints = True
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : `Fitter`
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : callable
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {"niter": None}
def __str__(self):
return (
f"Fitter: {self.fitter.__class__.__name__}\n"
f"Outlier function: {self.outlier_func.__name__}\n"
f"Num. of iterations: {self.niter}\n"
f"Outlier func. args.: {self.outlier_kwargs}"
)
def __repr__(self):
return (
f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, "
f"outlier_func: {self.outlier_func.__name__},"
f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})"
)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like, optional
Data measurements (2D case).
weights : array-like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y coordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if (
not hasattr(self.fitter, "supports_masked_input")
or self.fitter.supports_masked_input is not True
):
raise ValueError(
f"{type(self.fitter).__name__} cannot fit model sets with masked "
"values"
)
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input coordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x,)
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if "axis" not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs["axis"] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
n = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for n in range(1, self.niter + 1):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop("axis", None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True,
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(
data_T, mask_T, model_vals_T
):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn(
"outlier_func did not accept axis argument; "
"reverted to slow loop over models.",
AstropyUserWarning,
)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(
fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights,
**kwargs,
)
else:
fitted_model = self.fitter(
fitted_model,
*coords,
filtered_data,
weights=filtered_weights,
**kwargs,
)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {"niter": n}
self.fit_info.update(getattr(self.fitter, "fit_info", {}))
return fitted_model, filtered_data.mask
class _NonLinearLSQFitter(metaclass=_FitterMeta):
"""
Base class for Non-Linear least-squares fitters.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds : bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition.
Default: True
"""
supported_constraints = ["fixed", "tied", "bounds"]
"""
The constraint types supported by this fitter type.
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=True):
self.fit_info = None
self._calc_uncertainties = calc_uncertainties
self._use_min_max_bounds = use_min_max_bounds
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
fitter_to_model_params(model, fps, self._use_min_max_bounds)
meas = args[-1]
if weights is None:
value = np.ravel(model(*args[2:-1]) - meas)
else:
value = np.ravel(weights * (model(*args[2:-1]) - meas))
if not np.all(np.isfinite(value)):
raise NonFiniteValueError(
"Objective function has encountered a non-finite value, "
"this will cause the fit to fail!\n"
"Please remove non-finite values from your input data before "
"fitting to avoid this error."
)
return value
@staticmethod
def _add_fitting_uncertainties(model, cov_matrix):
"""
Set ``cov_matrix`` and ``stds`` attributes on model with parameter
covariance matrix returned by ``optimize.leastsq``.
"""
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
model.cov_matrix = Covariance(cov_matrix, free_param_names)
model.stds = StandardDeviations(cov_matrix, free_param_names)
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array(
[np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)]
)
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars], True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
fit_deriv = np.array(model.fit_deriv(x, *params))
try:
output = np.array(
[np.ravel(_) for _ in np.array(weights) * fit_deriv]
)
if output.shape != fit_deriv.shape:
output = np.array(
[np.ravel(_) for _ in np.atleast_2d(weights).T * fit_deriv]
)
return output
except ValueError:
return np.array(
[
np.ravel(_)
for _ in np.array(weights) * np.moveaxis(fit_deriv, -1, 0)
]
).transpose()
else:
if not model.col_fit_deriv:
return [
np.ravel(_)
for _ in (
np.ravel(weights)
* np.array(model.fit_deriv(x, y, *params)).T
).T
]
return [
np.ravel(_)
for _ in weights * np.array(model.fit_deriv(x, y, *params))
]
def _compute_param_cov(self, model, y, init_values, cov_x, fitparams, farg):
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg) ** 2)
dof = len(y) - len(init_values)
self.fit_info["param_cov"] = cov_x * sum_sqrs / dof
else:
self.fit_info["param_cov"] = None
if self._calc_uncertainties is True:
if self.fit_info["param_cov"] is not None:
self._add_fitting_uncertainties(model, self.fit_info["param_cov"])
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
return None, None, None
def _filter_non_finite(self, x, y, z=None):
"""
Filter out non-finite values in x, y, z.
Returns
-------
x, y, z : ndarrays
x, y, and z with non-finite values filtered out.
"""
MESSAGE = "Non-Finite input data has been removed by the fitter."
if z is None:
mask = np.isfinite(y)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], None
else:
mask = np.isfinite(z)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], z[mask]
@fitter_unit_support
def __call__(
self,
model,
x,
y,
z=None,
weights=None,
maxiter=DEFAULT_MAXITER,
acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS,
estimate_jacobian=False,
filter_non_finite=False,
):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
filter_non_finite : bool, optional
Whether or not to filter data with non-finite values. Default is False
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self.supported_constraints)
model_copy.sync_constraints = False
if filter_non_finite:
x, y, z = self._filter_non_finite(x, y, z)
farg = (
model_copy,
weights,
) + _convert_input(x, y, z)
init_values, fitparams, cov_x = self._run_fitter(
model_copy, farg, maxiter, acc, epsilon, estimate_jacobian
)
self._compute_param_cov(model_copy, y, init_values, cov_x, fitparams, farg)
model.sync_constraints = True
return model_copy
class LevMarLSQFitter(_NonLinearLSQFitter):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
def __init__(self, calc_uncertainties=False):
super().__init__(calc_uncertainties)
self.fit_info = {
"nfev": None,
"fvec": None,
"fjac": None,
"ipvt": None,
"qtf": None,
"message": None,
"ierr": None,
"param_jac": None,
"param_cov": None,
}
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
if model.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _, _ = model_to_fit_params(model)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function,
init_values,
args=farg,
Dfun=dfunc,
col_deriv=model.col_fit_deriv,
maxfev=maxiter,
epsfcn=epsilon,
xtol=acc,
full_output=True,
)
fitter_to_model_params(model, fitparams)
self.fit_info.update(dinfo)
self.fit_info["cov_x"] = cov_x
self.fit_info["message"] = mess
self.fit_info["ierr"] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn(
"The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning,
)
return init_values, fitparams, cov_x
class _NLLSQFitter(_NonLinearLSQFitter):
"""
Wrapper class for `scipy.optimize.least_squares` method, which provides:
- Trust Region Reflective
- dogbox
- Levenberg-Marqueardt
algorithms using the least squares statistic.
Parameters
----------
method : str
‘trf’ : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
‘dogbox’ : dogleg algorithm with rectangular trust regions, typical
use case is small problems with bounds. Not recommended for
problems with rank-deficient Jacobian.
‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn’t handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__(calc_uncertainties, use_min_max_bounds)
self._method = method
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
from scipy.linalg import svd
if model.fit_deriv is None or estimate_jacobian:
dfunc = "2-point"
else:
def _dfunc(params, model, weights, x, y, z=None):
if model.col_fit_deriv:
return np.transpose(
self._wrap_deriv(params, model, weights, x, y, z)
)
else:
return self._wrap_deriv(params, model, weights, x, y, z)
dfunc = _dfunc
init_values, _, bounds = model_to_fit_params(model)
# Note, if use_min_max_bounds is True we are defaulting to enforcing bounds
# using the old method employed by LevMarLSQFitter, this is different
# from the method that optimize.least_squares employs to enforce bounds
# thus we override the bounds being passed to optimize.least_squares so
# that it will not enforce any bounding.
if self._use_min_max_bounds:
bounds = (-np.inf, np.inf)
self.fit_info = optimize.least_squares(
self.objective_function,
init_values,
args=farg,
jac=dfunc,
max_nfev=maxiter,
diff_step=np.sqrt(epsilon),
xtol=acc,
method=self._method,
bounds=bounds,
)
# Adapted from ~scipy.optimize.minpack, see:
# https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(self.fit_info.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[: s.size]
cov_x = np.dot(VT.T / s**2, VT)
fitter_to_model_params(model, self.fit_info.x, False)
if not self.fit_info.success:
warnings.warn(
f"The fit may be unsuccessful; check: \n {self.fit_info.message}",
AstropyUserWarning,
)
return init_values, self.fit_info.x, cov_x
class TRFLSQFitter(_NLLSQFitter):
"""
Trust Region Reflective algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("trf", calc_uncertainties, use_min_max_bounds)
class DogBoxLSQFitter(_NLLSQFitter):
"""
DogBox algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("dogbox", calc_uncertainties, use_min_max_bounds)
class LMLSQFitter(_NLLSQFitter):
"""
`scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False):
super().__init__("lm", calc_uncertainties, True)
class SLSQPLSQFitter(Fitter):
"""
Sequential Least Squares Programming (SLSQP) optimization algorithm and
least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
Notes
-----
See also the `~astropy.modeling.optimizers.SLSQP` optimizer.
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
`ModelLinearityError`
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self.model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]["slice"]
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[: model.n_inputs + 1]
del lstsqargs[: model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError(f"Expected >1 models, {len(self.models)} is given")
if len(self.jointparams.keys()) < 2:
raise TypeError(
"At least two parameters are expected, "
f"{len(self.jointparams.keys())} is given"
)
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError(
f"{len(self.jointparams[j])} parameter(s) "
f"provided but {len(self.initvals)} expected"
)
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError(
f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} "
f"coordinates in args but {len(args)} provided"
)
self.fitparams[:], _ = optimize.leastsq(
self.objective_function, self.fitparams, args=args
)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
data_ndim, data_shape = z.ndim, z.shape
else:
data_ndim, data_shape = y.ndim, y.shape
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1 or data_ndim > x.ndim:
if (model_set_axis or 0) >= data_ndim:
raise ValueError("model_set_axis out of range")
if data_shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y or z array) is expected to equal "
"the number of parameter sets"
)
if z is None:
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
data_shape = y.shape[:-1]
else:
# Shape of z excluding model_set_axis
data_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1 :]
if z is None:
if data_shape != x.shape:
raise ValueError("x and y should have the same shape")
farg = (x, y)
else:
if not (x.shape == y.shape == data_shape):
raise ValueError("x, y and z should have the same shape")
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def fitter_to_model_params(model, fps, use_min_max_bounds=True):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
Parameters
----------
model :
The model being fit
fps :
The fit parameter values to be assigned
use_min_max_bounds: bool
If the set parameter bounds for model will be enforced on each
parameter with bounds.
Default: True
"""
_, fit_param_indices, _ = model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]["slice"]
shape = param_metrics[name]["shape"]
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset : offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None) and use_min_max_bounds:
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]["slice"]
# To handle multiple tied constraints, model parameters
# need to be updated after each iteration.
parameters[slice_] = value
model._array_to_parameters()
@deprecated("5.1", "private method: _fitter_to_model_params has been made public now")
def _fitter_to_model_params(model, fps):
return fitter_to_model_params(model, fps)
def model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
model_params = model.parameters
model_bounds = list(model.bounds.values())
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model_params)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]["slice"]
del params[slice_]
del model_bounds[slice_]
del fitparam_indices[idx]
model_params = np.array(params)
for idx, bound in enumerate(model_bounds):
if bound[0] is None:
lower = -np.inf
else:
lower = bound[0]
if bound[1] is None:
upper = np.inf
else:
upper = bound[1]
model_bounds[idx] = (lower, upper)
model_bounds = tuple(zip(*model_bounds))
return model_params, fitparam_indices, model_bounds
@deprecated("5.1", "private method: _model_to_fit_params has been made public now")
def _model_to_fit_params(model):
return model_to_fit_params(model)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = "Optimizer cannot handle {0} constraints."
if any(model.fixed.values()) and "fixed" not in supported_constraints:
raise UnsupportedConstraintError(message.format("fixed parameter"))
if any(model.tied.values()) and "tied" not in supported_constraints:
raise UnsupportedConstraintError(message.format("tied parameter"))
if (
any(tuple(b) != (None, None) for b in model.bounds.values())
and "bounds" not in supported_constraints
):
raise UnsupportedConstraintError(message.format("bound parameter"))
if model.eqcons and "eqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("equality"))
if model.ineqcons and "ineqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("inequality"))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn(
"Model is linear in parameters; consider using linear fitting methods.",
AstropyUserWarning,
)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here
<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(
AstropyUserWarning(
f"{type(e).__name__} error occurred in entry point {name}."
)
)
else:
if not inspect.isclass(entry_point):
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to be a Class."
)
)
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to extend "
"astropy.modeling.Fitter"
)
)
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, "select"):
populate_entry_points(ep.select(group="astropy.modeling"))
else:
populate_entry_points(ep.get("astropy.modeling", []))
_populate_ep()
|
598830b747a2185a4d845c5dcbffcc173173c29cec6ebf5c90568fb57ce1a2ed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
# pylint: disable=invalid-name, protected-access, redefined-outer-name
import abc
import copy
import functools
import inspect
import itertools
import operator
import types
from collections import defaultdict, deque
from inspect import signature
from itertools import chain
import numpy as np
from astropy.nddata.utils import add_array, extract_array
from astropy.table import Table
from astropy.units import Quantity, UnitsError, dimensionless_unscaled
from astropy.units.utils import quantity_asanyarray
from astropy.utils import (
IncompatibleShapeError,
check_broadcast,
find_current_module,
indent,
isiterable,
metadata,
sharedmethod,
)
from astropy.utils.codegen import make_function_with_signature
from .bounding_box import CompoundBoundingBox, ModelBoundingBox
from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline
from .utils import (
_combine_equivalency_dict,
_ConstraintsDict,
_SpecialOperatorsDict,
combine_labels,
get_inputs_and_params,
make_binary_operator_eval,
)
__all__ = [
"Model",
"FittableModel",
"Fittable1DModel",
"Fittable2DModel",
"CompoundModel",
"fix_inputs",
"custom_model",
"ModelDefinitionError",
"bind_bounding_box",
"bind_compound_bounding_box",
]
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
"""
return lambda left, right: CompoundModel(oper, left, right, **kwargs)
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions."""
class _ModelMeta(abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
def __new__(mcls, name, bases, members, **kwds):
# See the docstring for _is_dynamic above
if "_is_dynamic" not in members:
members["_is_dynamic"] = mcls._is_dynamic
opermethods = [
("__add__", _model_oper("+")),
("__sub__", _model_oper("-")),
("__mul__", _model_oper("*")),
("__truediv__", _model_oper("/")),
("__pow__", _model_oper("**")),
("__or__", _model_oper("|")),
("__and__", _model_oper("&")),
("_fix_inputs", _model_oper("fix_inputs")),
]
members["_parameters_"] = {
k: v for k, v in members.items() if isinstance(v, Parameter)
}
for opermethod, opercall in opermethods:
members[opermethod] = opercall
cls = super().__new__(mcls, name, bases, members, **kwds)
param_names = list(members["_parameters_"])
# Need to walk each base MRO to collect all parameter names
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
# Preserve order of definitions
param_names = list(tbase._parameters_) + param_names
# Remove duplicates (arising from redefinition in subclass).
param_names = list(dict.fromkeys(param_names))
if cls._parameters_:
if hasattr(cls, "_param_names"):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(param_names)
else:
cls.param_names = tuple(param_names)
return cls
def __init__(cls, name, bases, members, **kwds):
super().__init__(name, bases, members, **kwds)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
pdict = {}
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
for parname, val in cls._parameters_.items():
pdict[parname] = val
cls._handle_special_methods(members, pdict)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith("_abc_"):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ("__init__", "__call__"):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith("_") or inspect.isabstract(cls))
def rename(cls, name=None, inputs=None, outputs=None):
"""
Creates a copy of this model class with a new name, inputs or outputs.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class 'astropy.modeling.core.SkyRotation'>
Name: SkyRotation (Rotation2D)
N_inputs: 2
N_outputs: 2
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
if name is None:
name = cls.name
if inputs is None:
inputs = cls.inputs
else:
if not isinstance(inputs, tuple):
raise TypeError("Expected 'inputs' to be a tuple of strings.")
elif len(inputs) != len(cls.inputs):
raise ValueError(f"{cls.name} expects {len(cls.inputs)} inputs")
if outputs is None:
outputs = cls.outputs
else:
if not isinstance(outputs, tuple):
raise TypeError("Expected 'outputs' to be a tuple of strings.")
elif len(outputs) != len(cls.outputs):
raise ValueError(f"{cls.name} expects {len(cls.outputs)} outputs")
new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs})
new_cls.__module__ = modname
new_cls.__qualname__ = name
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get("inverse")
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get("bounding_box")
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = ModelBoundingBox.validate(
cls, bounding_box, _preserve_ignore=True
)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of ModelBoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
f"The bounding_box method for {cls.name} is not correctly "
"defined: If defined as a method all arguments to that "
"method (besides self) must be keyword arguments with "
"default values that can be used to compute a default "
"bounding box."
)
kwargs.append((param.name, param.default))
__call__.__signature__ = sig
return type(
f"{cls.name}ModelBoundingBox", (ModelBoundingBox,), {"__call__": __call__}
)
def _handle_special_methods(cls, members, pdict):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, "__qualname__"):
wrapper.__qualname__ = f"{cls.__qualname__}.{wrapper.__name__}"
if (
"__call__" not in members
and "n_inputs" in members
and isinstance(members["n_inputs"], int)
and members["n_inputs"] > 0
):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
args = ("self",)
kwargs = {
"model_set_axis": None,
"with_bounding_box": False,
"fill_value": np.nan,
"equivalencies": None,
"inputs_map": None,
}
new_call = make_function_with_signature(
__call__, args, kwargs, varargs="inputs", varkwargs="new_inputs"
)
# The following makes it look like __call__
# was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if (
"__init__" not in members
and not inspect.isabstract(cls)
and cls._parameters_
):
# Build list of all parameters including inherited ones
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional
# arguments
if all(p.default is not None for p in pdict.values()):
args = ("self",)
kwargs = []
for param_name, param_val in pdict.items():
default = param_val.default
unit = param_val.unit
# If the unit was specified in the parameter but the
# default is not a Quantity, attach the unit to the
# default.
if unit is not None:
default = Quantity(default, unit, copy=False, subok=True)
kwargs.append((param_name, default))
else:
args = ("self",) + tuple(pdict.keys())
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs="kwargs"
)
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
_fix_inputs = _model_oper("fix_inputs")
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif inspect.isabstract(base) or base.__name__.startswith("_"):
break
bases.append(base.name)
if bases:
return f"{cls.name} ({' -> '.join(bases)})"
return cls.name
try:
default_keywords = [
("Name", format_inheritance(cls)),
("N_inputs", cls.n_inputs),
("N_outputs", cls.n_outputs),
]
if cls.param_names:
default_keywords.append(("Fittable parameters", cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append(f"{keyword}: {value}")
return "\n".join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ("eqcons", "ineqcons")
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
n_inputs = 0
"""The number of inputs."""
n_outputs = 0
""" The number of outputs."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
_has_inverse_bounding_box = False
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
# Covariance matrix can be set by fitter if available.
# If cov_matrix is available, then std will set as well
_cov_matrix = None
_stds = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
self._default_inputs_outputs()
if meta is not None:
self.meta = meta
self._name = name
# add parameters to instance level by walking MRO list
mro = self.__class__.__mro__
for cls in mro:
if issubclass(cls, Model):
for parname, val in cls._parameters_.items():
newpar = copy.deepcopy(val)
newpar.model = self
if parname not in self.__dict__:
self.__dict__[parname] = newpar
self._initialize_constraints(kwargs)
kwargs = self._initialize_setters(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_slices()
self._initialize_unit_support()
def _default_inputs_outputs(self):
if self.n_inputs == 1 and self.n_outputs == 1:
self._inputs = ("x",)
self._outputs = ("y",)
elif self.n_inputs == 2 and self.n_outputs == 1:
self._inputs = ("x", "y")
self._outputs = ("z",)
else:
try:
self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs))
self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs))
except TypeError:
# self.n_inputs and self.n_outputs are properties
# This is the case when subclasses of Model do not define
# ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``.
self._inputs = ()
self._outputs = ()
def _initialize_setters(self, kwargs):
"""
This exists to inject defaults for settable properties for models
originating from `custom_model`.
"""
if hasattr(self, "_settable_properties"):
setters = {
name: kwargs.pop(name, default)
for name, default in self._settable_properties.items()
}
for name, value in setters.items():
setattr(self, name, value)
return kwargs
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, val):
if len(val) != self.n_inputs:
raise ValueError(
f"Expected {self.n_inputs} number of inputs, got {len(val)}."
)
self._inputs = val
self._initialize_unit_support()
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, val):
if len(val) != self.n_outputs:
raise ValueError(
f"Expected {self.n_outputs} number of outputs, got {len(val)}."
)
self._outputs = val
@property
def n_inputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``inputs`` as class variables is removed.
if hasattr(self.__class__, "n_inputs") and isinstance(
self.__class__.n_inputs, property
):
try:
return len(self.__class__.inputs)
except TypeError:
try:
return len(self.inputs)
except AttributeError:
return 0
return self.__class__.n_inputs
@property
def n_outputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``outputs`` as class variables is removed.
if hasattr(self.__class__, "n_outputs") and isinstance(
self.__class__.n_outputs, property
):
try:
return len(self.__class__.outputs)
except TypeError:
try:
return len(self.outputs)
except AttributeError:
return 0
return self.__class__.n_outputs
def _calculate_separability_matrix(self):
"""
This is a hook which customises the behavior of modeling.separable.
This allows complex subclasses to customise the separability matrix.
If it returns `NotImplemented` the default behavior is used.
"""
return NotImplemented
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolean value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {
key: self._input_units_strict for key in self.inputs
}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {
key: self._input_units_allow_dimensionless for key in self.inputs
}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
@staticmethod
def _strip_ones(intup):
return tuple(item for item in intup if item != 1)
def __setattr__(self, attr, value):
if isinstance(self, CompoundModel):
param_names = self._param_names
param_names = self.param_names
if param_names is not None and attr in self.param_names:
param = self.__dict__[attr]
value = _tofloat(value)
if param._validator is not None:
param._validator(self, value)
# check consistency with previous shape and size
eshape = self._param_metrics[attr]["shape"]
if eshape == ():
eshape = (1,)
vshape = np.array(value).shape
if vshape == ():
vshape = (1,)
esize = self._param_metrics[attr]["size"]
if np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones(
eshape
):
raise InputParameterError(
f"Value for parameter {attr} does not match shape or size\nexpected"
f" by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})"
)
if param.unit is None:
if isinstance(value, Quantity):
param._unit = value.unit
param.value = value.value
else:
param.value = value
else:
if not isinstance(value, Quantity):
raise UnitsError(
f"The '{param.name}' parameter should be given as a"
" Quantity because it was originally "
"initialized as a Quantity"
)
param._unit = value.unit
param.value = value.value
else:
if attr in ["fittable", "linear"]:
self.__dict__[attr] = value
else:
super().__setattr__(attr, value)
def _pre_evaluate(self, *args, **kwargs):
"""
Model specific input setup that needs to occur prior to model evaluation.
"""
# Broadcast inputs into common size
inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs)
# Setup actual model evaluation method
parameters = self._param_sets(raw=True, units=True)
def evaluate(_inputs):
return self.evaluate(*chain(_inputs, parameters))
return evaluate, inputs, broadcasted_shapes, kwargs
def get_bounding_box(self, with_bbox=True):
"""
Return the ``bounding_box`` of a model if it exists or ``None``
otherwise.
Parameters
----------
with_bbox :
The value of the ``with_bounding_box`` keyword argument
when calling the model. Default is `True` for usage when
looking up the model's ``bounding_box`` without risk of error.
"""
bbox = None
if not isinstance(with_bbox, bool) or with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
pass
if isinstance(bbox, CompoundBoundingBox) and not isinstance(
with_bbox, bool
):
bbox = bbox[with_bbox]
return bbox
@property
def _argnames(self):
"""The inputs used to determine input_shape for bounding_box evaluation."""
return self.inputs
def _validate_input_shape(
self, _input, idx, argnames, model_set_axis, check_model_set_axis
):
"""Perform basic validation of a single model input's shape.
The shape has the minimum dimensions for the given model_set_axis.
Returns the shape of the input if validation succeeds.
"""
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
f"For model_set_axis={model_set_axis}, all inputs must be at "
f"least {model_set_axis + 1}-dimensional."
)
if input_shape[model_set_axis] != self._n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
f"Input argument '{argname}' does not have the correct dimensions"
f" in model_set_axis={model_set_axis} for a model set with"
f" n_models={self._n_models}."
)
return input_shape
def _validate_input_shapes(self, inputs, argnames, model_set_axis):
"""
Perform basic validation of model inputs
--that they are mutually broadcastable and that they have
the minimum dimensions for the given model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = self._n_models > 1 and model_set_axis is not False
all_shapes = []
for idx, _input in enumerate(inputs):
all_shapes.append(
self._validate_input_shape(
_input, idx, argnames, model_set_axis, check_model_set_axis
)
)
input_shape = check_broadcast(*all_shapes)
if input_shape is None:
raise ValueError(
"All inputs must have identical shapes or must be scalars."
)
return input_shape
def input_shape(self, inputs):
"""Get input shape for bounding_box evaluation."""
return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis)
def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox):
"""Generic model evaluation routine.
Selects and evaluates model with or without bounding_box enforcement.
"""
# Evaluate the model using the prepared evaluation method either
# enforcing the bounding_box or not.
bbox = self.get_bounding_box(with_bbox)
if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None:
outputs = bbox.evaluate(evaluate, _inputs, fill_value)
else:
outputs = evaluate(_inputs)
return outputs
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
Model specific post evaluation processing of outputs.
"""
if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
return outputs
@property
def bbox_with_units(self):
return not isinstance(self, CompoundModel)
def __call__(self, *args, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
# Turn any keyword arguments into positional arguments.
args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs)
# Read model evaluation related parameters
with_bbox = kwargs.pop("with_bounding_box", False)
fill_value = kwargs.pop("fill_value", np.nan)
# prepare for model evaluation (overridden in CompoundModel)
evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(
*args, **kwargs
)
outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox)
# post-process evaluation results (overridden in CompoundModel)
return self._post_evaluate(
inputs, outputs, broadcasted_shapes, with_bbox, **kwargs
)
def _get_renamed_inputs_as_positional(self, *args, **kwargs):
def _keyword2positional(kwargs):
# Inputs were passed as keyword (not positional) arguments.
# Because the signature of the ``__call__`` is defined at
# the class level, the name of the inputs cannot be changed at
# the instance level and the old names are always present in the
# signature of the method. In order to use the new names of the
# inputs, the old names are taken out of ``kwargs``, the input
# values are sorted in the order of self.inputs and passed as
# positional arguments to ``__call__``.
# These are the keys that are always present as keyword arguments.
keys = [
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
]
new_inputs = {}
# kwargs contain the names of the new inputs + ``keys``
allkeys = list(kwargs.keys())
# Remove the names of the new inputs from kwargs and save them
# to a dict ``new_inputs``.
for key in allkeys:
if key not in keys:
new_inputs[key] = kwargs[key]
del kwargs[key]
return new_inputs, kwargs
n_args = len(args)
new_inputs, kwargs = _keyword2positional(kwargs)
n_all_args = n_args + len(new_inputs)
if n_all_args < self.n_inputs:
raise ValueError(
f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}"
)
elif n_all_args > self.n_inputs:
raise ValueError(
f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}"
)
if n_args == 0:
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
new_args.append(new_inputs[k])
elif n_args != self.n_inputs:
# Some inputs are passed as positional, others as keyword arguments.
args = list(args)
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
if k in new_inputs:
new_args.append(new_inputs[k])
else:
new_args.append(args[0])
del args[0]
else:
new_args = args
return new_args, kwargs
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on :ref:`astropy:modeling-model-sets`
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
self._parameters_to_array()
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
f"parameters array: {e!r}"
)
self._array_to_parameters()
@property
def sync_constraints(self):
"""
This is a boolean property that indicates whether or not accessing constraints
automatically check the constituent models current values. It defaults to True
on creation of a model, but for fitting purposes it should be set to False
for performance reasons.
"""
if not hasattr(self, "_sync_constraints"):
self._sync_constraints = True
return self._sync_constraints
@sync_constraints.setter
def sync_constraints(self, value):
if not isinstance(value, bool):
raise ValueError("sync_constraints only accepts True or False as values")
self._sync_constraints = value
@property
def fixed(self):
"""
A ``dict`` mapping parameter names to their fixed constraint.
"""
if not hasattr(self, "_fixed") or self.sync_constraints:
self._fixed = _ConstraintsDict(self, "fixed")
return self._fixed
@property
def bounds(self):
"""
A ``dict`` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
if not hasattr(self, "_bounds") or self.sync_constraints:
self._bounds = _ConstraintsDict(self, "bounds")
return self._bounds
@property
def tied(self):
"""
A ``dict`` mapping parameter names to their tied constraint.
"""
if not hasattr(self, "_tied") or self.sync_constraints:
self._tied = _ConstraintsDict(self, "tied")
return self._tied
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._mconstraints["eqcons"]
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._mconstraints["ineqcons"]
def has_inverse(self):
"""
Returns True if the model has an analytic or user
inverse defined.
"""
try:
self.inverse
except NotImplementedError:
return False
return True
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
result = self._inverse()
if result is not NotImplemented:
if not self._has_inverse_bounding_box:
result.bounding_box = None
return result
raise NotImplementedError(
"No analytical or user-supplied inverse transform "
"has been implemented for this model."
)
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse."
)
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
try:
del self._user_inverse
except AttributeError:
pass
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
raise `NotImplementedError` for no bounding_box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`astropy:bounding-boxes`
The limits are ordered according to the `numpy` ``'C'`` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: No bounding box is defined for this model
(note: the bounding box was explicitly disabled for this model;
use `del model.bounding_box` to restore the default bounding box,
if one is defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model)."
)
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError("No bounding box is defined for this model.")
elif isinstance(self._bounding_box, ModelBoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return ModelBoundingBox.validate(self, self._bounding_box())
else:
# The only other allowed possibility is that it's a ModelBoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), model=self)()
return self._bounding_box(bounding_box, model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif isinstance(bounding_box, (CompoundBoundingBox, dict)):
cls = CompoundBoundingBox
elif isinstance(self._bounding_box, type) and issubclass(
self._bounding_box, ModelBoundingBox
):
cls = self._bounding_box
else:
cls = ModelBoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
def set_slice_args(self, *args):
if isinstance(self._user_bounding_box, CompoundBoundingBox):
self._user_bounding_box.slice_args = args
else:
raise RuntimeError("The bounding_box for this model is not compound")
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def cov_matrix(self):
"""
Fitter should set covariance matrix, if available.
"""
return self._cov_matrix
@cov_matrix.setter
def cov_matrix(self, cov):
self._cov_matrix = cov
unfix_untied_params = [
p
for p in self.param_names
if (self.fixed[p] is False) and (self.tied[p] is False)
]
if type(cov) == list: # model set
param_stds = []
for c in cov:
param_stds.append(
[np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)]
)
for p, param_name in enumerate(unfix_untied_params):
par = getattr(self, param_name)
par.std = [item[p] for item in param_stds]
setattr(self, param_name, par)
else:
param_stds = [
np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)
]
for param_name in unfix_untied_params:
par = getattr(self, param_name)
par.std = param_stds.pop(0)
setattr(self, param_name, par)
@property
def stds(self):
"""
Standard deviation of parameters, if covariance matrix is available.
"""
return self._stds
@stds.setter
def stds(self, stds):
self._stds = stds
@property
def separable(self):
"""A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
raise NotImplementedError(
'The "separable" property is not defined for '
f"model {self.__class__.__name__}"
)
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have
been converted to the right units for the data, then the units have
been stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not
necessarily the units of the input data, but are derived from them.
Model subclasses that want fitting to work in the presence of
quantities need to define a ``_parameter_units_for_data_units`` method
that takes the input and output units (as two dictionaries) and
returns a dictionary giving the target units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, CompoundModel):
model.strip_units_from_tree()
return model
def output_units(self, **kwargs):
"""
Return a dictionary of output units for this model given a dictionary
of fitting inputs and outputs.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
This method will force extra model evaluations, which maybe computationally
expensive. To avoid this, one can add a return_units property to the model,
see :ref:`astropy:models_return_units`.
"""
units = self.return_units
if units is None or units == {}:
inputs = {inp: kwargs[inp] for inp in self.inputs}
values = self(**inputs)
if self.n_outputs == 1:
values = (values,)
units = {
out: getattr(values[index], "unit", dimensionless_unscaled)
for index, out in enumerate(self.outputs)
}
return units
def strip_units_from_tree(self):
for item in self._leaflist:
for parname in item.param_names:
par = getattr(item, parname)
par._set_unit(None, force=True)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units
of the input data, but are derived from them. Model subclasses that
want fitting to work in the presence of quantities need to define a
``_parameter_units_for_data_units`` method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly,
# hence the call to ``_set_unit``.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
return any(getattr(self, param).unit is not None for param in self.param_names)
@property
def _supports_unit_fitting(self):
# If the model has a ``_parameter_units_for_data_units`` method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, "_parameter_units_for_data_units")
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel,
# important when using add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, "_input_units"):
return self._input_units
elif hasattr(self.evaluate, "__annotations__"):
annotations = self.evaluate.__annotations__.copy()
annotations.pop("return", None)
if annotations:
# If there are not annotations for all inputs this will error.
return {name: annotations[name] for name in self.inputs}
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the
output of evaluate should be in, and returns a dictionary mapping
outputs to units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, "_return_units"):
return self._return_units
elif hasattr(self.evaluate, "__annotations__"):
return self.evaluate.__annotations__.get("return", None)
else:
# None means any unit is accepted
return None
def _prepare_inputs_single_model(self, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if self.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
f"self input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} cannot be broadcast with parameter"
f" {param.name!r} of shape {param.shape!r}."
)
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if self.n_outputs > self.n_inputs:
extra_outputs = self.n_outputs - self.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_self)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
@staticmethod
def _remove_axes_from_shape(shape, axis):
"""
Given a shape tuple as the first input, construct a new one by removing
that particular axis from the shape and all preceding axes. Negative axis
numbers are permittted, where the axis is relative to the last axis.
"""
if len(shape) == 0:
return shape
if axis < 0:
axis = len(shape) + axis
return shape[:axis] + shape[axis + 1 :]
if axis >= len(shape):
axis = len(shape) - 1
shape = shape[axis + 1 :]
return shape
def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs):
reshaped = []
pivots = []
model_set_axis_param = self.model_set_axis # needed to reshape param
for idx, _input in enumerate(inputs):
max_param_shape = ()
if self._n_models > 1 and model_set_axis_input is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (
_input.shape[:model_set_axis_input]
+ _input.shape[model_set_axis_input + 1 :]
)
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(
input_shape,
self._remove_axes_from_shape(param.shape, model_set_axis_param),
)
except IncompatibleShapeError:
raise ValueError(
f"Model input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} "
f"cannot be broadcast with parameter {param.name!r} of shape "
f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}."
)
if len(param.shape) - 1 > len(max_param_shape):
max_param_shape = self._remove_axes_from_shape(
param.shape, model_set_axis_param
)
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis_input is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model_set_axis_param
else:
pivot = input_ndim - len(max_param_shape)
new_shape = _input.shape[:pivot] + (1,) + _input.shape[pivot:]
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = self.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (
_input.shape[: pivot + 1] + new_axes + _input.shape[pivot + 1 :]
)
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if self.n_inputs < self.n_outputs:
pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs))
return reshaped, (pivots,)
def prepare_inputs(
self, *inputs, model_set_axis=None, equivalencies=None, **kwargs
):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
self._validate_input_shapes(inputs, self.inputs, model_set_axis)
inputs_map = kwargs.get("inputs_map", None)
inputs = self._validate_input_units(inputs, equivalencies, inputs_map)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if self._n_models == 1:
return self._prepare_inputs_single_model(params, inputs, **kwargs)
else:
return self._prepare_inputs_model_set(
params, inputs, model_set_axis, **kwargs
)
def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# If a leaflist is provided that means this is in the context of
# a compound model and it is necessary to create the appropriate
# alias for the input coordinate name for the equivalencies dict
if inputs_map:
edict = {}
for mod, mapping in inputs_map:
if self is mod:
edict[mapping[0]] = equivalencies[mapping[1]]
else:
edict = equivalencies
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
self.inputs, edict, self.input_units_equivalencies
)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(
input_unit, equivalencies=input_units_equivalencies[input_name]
):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is
# because some equivalencies are non-linear, and
# we need to be sure that we evaluate the model in
# its own frame of reference. If input_units_strict
# is set, we also need to convert to the input units.
if (
len(input_units_equivalencies) > 0
or self.input_units_strict[input_name]
):
inputs[i] = inputs[i].to(
input_unit,
equivalencies=input_units_equivalencies[input_name],
)
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
"could not be converted to "
"required dimensionless "
"input"
)
else:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
" could not be "
"converted to required input"
f" units of {input_unit} ({input_unit.physical_type})"
)
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (
not self.input_units_allow_dimensionless[input_name]
and input_unit is not dimensionless_unscaled
and input_unit is not None
):
if np.any(inputs[i] != 0):
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}',"
" (dimensionless), could not be converted to required "
f"input units of {input_unit} "
f"({input_unit.physical_type})"
)
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple(
Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)
)
return outputs
@staticmethod
def _prepare_output_single_model(output, broadcast_shape):
if broadcast_shape is not None:
if not broadcast_shape:
return output.item()
else:
try:
return output.reshape(broadcast_shape)
except ValueError:
try:
return output.item()
except ValueError:
return output
return output
def _prepare_outputs_single_model(self, outputs, broadcasted_shapes):
outputs = list(outputs)
for idx, output in enumerate(outputs):
try:
broadcast_shape = check_broadcast(*broadcasted_shapes[0])
except (IndexError, TypeError):
broadcast_shape = broadcasted_shapes[0][idx]
outputs[idx] = self._prepare_output_single_model(output, broadcast_shape)
return tuple(outputs)
def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis):
pivots = broadcasted_shapes[0]
# If model_set_axis = False was passed then use
# self._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = self.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot, model_set_axis)
return tuple(outputs)
def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs):
model_set_axis = kwargs.get("model_set_axis", None)
if len(self) == 1:
return self._prepare_outputs_single_model(outputs, broadcasted_shapes)
else:
return self._prepare_outputs_model_set(
outputs, broadcasted_shapes, model_set_axis
)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return self.copy()
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
def coerce_units(
self,
input_units=None,
return_units=None,
input_units_equivalencies=None,
input_units_allow_dimensionless=False,
):
"""
Attach units to this (unitless) model.
Parameters
----------
input_units : dict or tuple, optional
Input units to attach. If dict, each key is the name of a model input,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.inputs`.
return_units : dict or tuple, optional
Output units to attach. If dict, each key is the name of a model output,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.outputs`.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : bool or dict, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
Returns
-------
`CompoundModel`
A `CompoundModel` composed of the current model plus
`~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units.
Raises
------
ValueError
If the current model already has units.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True)
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
from .mappings import UnitsMapping
result = self
if input_units is not None:
if self.input_units is not None:
model_units = self.input_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.inputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify input_units for model with existing input units"
)
if isinstance(input_units, dict):
if input_units.keys() != set(self.inputs):
message = (
f"""input_units keys ({", ".join(input_units.keys())}) """
f"""do not match model inputs ({", ".join(self.inputs)})"""
)
raise ValueError(message)
input_units = [input_units[i] for i in self.inputs]
if len(input_units) != self.n_inputs:
message = (
"input_units length does not match n_inputs: "
f"expected {self.n_inputs}, received {len(input_units)}"
)
raise ValueError(message)
mapping = tuple(
(unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units)
)
input_mapping = UnitsMapping(
mapping,
input_units_equivalencies=input_units_equivalencies,
input_units_allow_dimensionless=input_units_allow_dimensionless,
)
input_mapping.inputs = self.inputs
input_mapping.outputs = self.inputs
result = input_mapping | result
if return_units is not None:
if self.return_units is not None:
model_units = self.return_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.outputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify return_units for model "
"with existing output units"
)
if isinstance(return_units, dict):
if return_units.keys() != set(self.outputs):
message = (
f"""return_units keys ({", ".join(return_units.keys())}) """
f"""do not match model outputs ({", ".join(self.outputs)})"""
)
raise ValueError(message)
return_units = [return_units[i] for i in self.outputs]
if len(return_units) != self.n_outputs:
message = (
"return_units length does not match n_outputs: "
f"expected {self.n_outputs}, received {len(return_units)}"
)
raise ValueError(message)
mapping = tuple(
(model_units.get(i), unit)
for i, unit in zip(self.outputs, return_units)
)
return_mapping = UnitsMapping(mapping)
return_mapping.inputs = self.outputs
return_mapping.outputs = self.outputs
result = result | return_mapping
return result
@property
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
for ckey, cvalue in values.items():
param = getattr(self, ckey)
setattr(param, constraint, cvalue)
self._mconstraints = {}
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._mconstraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
n_models = kwargs.pop("n_models", None)
if not (
n_models is None
or (isinstance(n_models, (int, np.integer)) and n_models >= 1)
):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
f"(got {n_models!r})"
)
model_set_axis = kwargs.pop("model_set_axis", None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (
model_set_axis is False
or np.issubdtype(type(model_set_axis), np.integer)
):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
f"model in a set of models (got {model_set_axis!r})."
)
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = set()
if len(args) > len(self.param_names):
raise TypeError(
f"{self.__class__.__name__}.__init__() takes at most "
f"{len(self.param_names)} positional arguments ({len(args)} given)"
)
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
param_name = self.param_names[idx]
params.add(param_name)
if not isinstance(arg, Parameter):
value = quantity_asanyarray(arg, dtype=float)
else:
value = arg
self._initialize_parameter_value(param_name, value)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
f"{self.__class__.__name__}.__init__() got multiple values for"
f" parameter {param_name!r}"
)
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray
# because if any of the arguments are quantities, we need
# to return a Quantity object not a plain Numpy array.
value = quantity_asanyarray(value, dtype=float)
params.add(param_name)
self._initialize_parameter_value(param_name, value)
# Now deal with case where param_name is not supplied by args or kwargs
for param_name in self.param_names:
if param_name not in params:
self._initialize_parameter_value(param_name, None)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
f"{self.__class__.__name__}.__init__() got an unrecognized"
f" parameter {kwarg!r}"
)
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name in self.param_names:
value = getattr(self, name)
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension at least"
f" {min_ndim} for model_set_axis={model_set_axis} (the value"
f" given for {name!r} is only {param_ndim}-dimensional)"
)
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
f"Inconsistent dimensions for parameter {name!r} for"
f" {n_models} model sets. The length of axis"
f" {model_set_axis} must be the same for all input parameter"
" values"
)
self._check_param_broadcast(max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(None)
self._n_models = n_models
# now validate parameters
for name in params:
param = getattr(self, name)
if param._validator is not None:
param._validator(self, param.value)
def _initialize_parameter_value(self, param_name, value):
"""Mostly deals with consistency checks and determining unit issues."""
if isinstance(value, Parameter):
self.__dict__[param_name] = value
return
param = getattr(self, param_name)
# Use default if value is not provided
if value is None:
default = param.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
f"{self.__class__.__name__}.__init__() requires a value for "
f"parameter {param_name!r}"
)
value = default
unit = param.unit
else:
if isinstance(value, Quantity):
unit = value.unit
value = value.value
else:
unit = None
if unit is None and param.unit is not None:
raise InputParameterError(
f"{self.__class__.__name__}.__init__() requires a Quantity for"
f" parameter {param_name!r}"
)
param._unit = unit
param._set_unit(unit, force=True)
param.internal_unit = None
if param._setter is not None:
if unit is not None:
_val = param._setter(value * unit)
else:
_val = param._setter(value)
if isinstance(_val, Quantity):
param.internal_unit = _val.unit
param._internal_value = np.array(_val.value)
else:
param.internal_unit = None
param._internal_value = np.array(_val)
else:
param._value = np.array(value)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
def _parameters_to_array(self):
# Now set the parameter values (this will also fill
# self._parameters)
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = param.value
if not isinstance(value, np.ndarray):
value = np.array([value])
self._parameters[param_metrics[name]["slice"]] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
def _array_to_parameters(self):
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = self._parameters[param_metrics[name]["slice"]]
value.shape = param_metrics[name]["shape"]
param.value = value
def _check_param_broadcast(self, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
model_set_axis = self._model_set_axis
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (
param_shape[: model_set_axis + 1]
+ new_axes
+ param_shape[model_set_axis + 1 :]
)
self._param_metrics[name]["broadcast_shape"] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = self.param_names[shape_a_idx]
param_b = self.param_names[shape_b_idx]
raise InputParameterError(
f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with "
f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules."
)
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw and param._setter:
value = param._internal_value
else:
value = param.value
broadcast_shape = self._param_metrics[name].get("broadcast_shape")
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and param.internal_unit is not None:
unit = param.internal_unit
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit, subok=True)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
parts = [repr(a) for a in args]
parts.extend(
f"{name}={param_repr_oneline(getattr(self, name))}"
for name in self.param_names
)
if self.name is not None:
parts.append(f"name={self.name!r}")
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] == value:
continue
parts.append(f"{kwarg}={value!r}")
if len(self) > 1:
parts.append(f"n_models={len(self)}")
return f"<{self.__class__.__name__}({', '.join(parts)})>"
def _format_str(self, keywords=[], defaults={}):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
("Model", self.__class__.__name__),
("Name", self.name),
("Inputs", self.inputs),
("Outputs", self.outputs),
("Model set size", len(self)),
]
parts = [
f"{keyword}: {value}"
for keyword, value in default_keywords
if value is not None
]
for keyword, value in keywords:
if keyword.lower() in defaults and defaults[keyword.lower()] == value:
continue
parts.append(f"{keyword}: {value}")
parts.append("Parameters:")
if len(self) == 1:
columns = [[getattr(self, name).value] for name in self.param_names]
else:
columns = [getattr(self, name).value for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return "\n".join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 1
n_outputs = 1
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 2
n_outputs = 1
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params), f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (
lambda inputs, params: (
f[0](inputs[: f[1]], params) + g[0](inputs[f[1] :], params)
),
f[1] + g[1],
f[2] + g[2],
)
BINARY_OPERATORS = {
"+": _make_arithmetic_operator(operator.add),
"-": _make_arithmetic_operator(operator.sub),
"*": _make_arithmetic_operator(operator.mul),
"/": _make_arithmetic_operator(operator.truediv),
"**": _make_arithmetic_operator(operator.pow),
"|": _composition_operator,
"&": _join_operator,
}
SPECIAL_OPERATORS = _SpecialOperatorsDict()
def _add_special_operator(sop_name, sop):
return SPECIAL_OPERATORS.add(sop_name, sop)
class CompoundModel(Model):
"""
Base class for compound models.
While it can be used directly, the recommended way
to combine models is through the model operators.
"""
def __init__(self, op, left, right, name=None):
self.__dict__["_param_names"] = None
self._n_submodels = None
self.op = op
self.left = left
self.right = right
self._bounding_box = None
self._user_bounding_box = None
self._leaflist = None
self._tdict = None
self._parameters = None
self._parameters_ = None
self._param_metrics = None
if op != "fix_inputs" and len(left) != len(right):
raise ValueError("Both operands must have equal values for n_models")
self._n_models = len(left)
if op != "fix_inputs" and (
(left.model_set_axis != right.model_set_axis) or left.model_set_axis
): # not False and not 0
raise ValueError(
"model_set_axis must be False or 0 and consistent for operands"
)
self._model_set_axis = left.model_set_axis
if op in ["+", "-", "*", "/", "**"] or op in SPECIAL_OPERATORS:
if left.n_inputs != right.n_inputs or left.n_outputs != right.n_outputs:
raise ModelDefinitionError(
"Both operands must match numbers of inputs and outputs"
)
self.n_inputs = left.n_inputs
self.n_outputs = left.n_outputs
self.inputs = left.inputs
self.outputs = left.outputs
elif op == "&":
self.n_inputs = left.n_inputs + right.n_inputs
self.n_outputs = left.n_outputs + right.n_outputs
self.inputs = combine_labels(left.inputs, right.inputs)
self.outputs = combine_labels(left.outputs, right.outputs)
elif op == "|":
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |:"
f" {left.name} (n_inputs={left.n_inputs},"
f" n_outputs={left.n_outputs}) and"
f" {right.name} (n_inputs={right.n_inputs},"
f" n_outputs={right.n_outputs}); n_outputs for the left-hand model"
" must match n_inputs for the right-hand model."
)
self.n_inputs = left.n_inputs
self.n_outputs = right.n_outputs
self.inputs = left.inputs
self.outputs = right.outputs
elif op == "fix_inputs":
if not isinstance(left, Model):
raise ValueError(
'First argument to "fix_inputs" must be an instance of '
"an astropy Model."
)
if not isinstance(right, dict):
raise ValueError(
'Expected a dictionary for second argument of "fix_inputs".'
)
# Dict keys must match either possible indices
# for model on left side, or names for inputs.
self.n_inputs = left.n_inputs - len(right)
# Assign directly to the private attribute (instead of using the setter)
# to avoid asserting the new number of outputs matches the old one.
self._outputs = left.outputs
self.n_outputs = left.n_outputs
newinputs = list(left.inputs)
keys = right.keys()
input_ind = []
for key in keys:
if np.issubdtype(type(key), np.integer):
if key >= left.n_inputs or key < 0:
raise ValueError(
"Substitution key integer value "
"not among possible input choices."
)
if key in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(key)
elif isinstance(key, str):
if key not in left.inputs:
raise ValueError(
"Substitution key string not among possible input choices."
)
# Check to see it doesn't match positional
# specification.
ind = left.inputs.index(key)
if ind in input_ind:
raise ValueError(
"Duplicate specification of same input (index/name)."
)
input_ind.append(ind)
# Remove substituted inputs
input_ind.sort()
input_ind.reverse()
for ind in input_ind:
del newinputs[ind]
self.inputs = tuple(newinputs)
# Now check to see if the input model has bounding_box defined.
# If so, remove the appropriate dimensions and set it for this
# instance.
try:
self.bounding_box = self.left.bounding_box.fix_inputs(self, right)
except NotImplementedError:
pass
else:
raise ModelDefinitionError("Illegal operator: ", self.op)
self.name = name
self._fittable = None
self.fit_deriv = None
self.col_fit_deriv = None
if op in ("|", "+", "-"):
self.linear = left.linear and right.linear
else:
self.linear = False
self.eqcons = []
self.ineqcons = []
self.n_left_params = len(self.left.parameters)
self._map_parameters()
def _get_left_inputs_from_args(self, args):
return args[: self.left.n_inputs]
def _get_right_inputs_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs : self.left.n_inputs + self.right.n_inputs]
elif op == "|" or op == "fix_inputs":
return None
else:
return args[: self.left.n_inputs]
def _get_left_params_from_args(self, args):
op = self.op
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
n_inputs = self.left.n_inputs + self.right.n_inputs
return args[n_inputs : n_inputs + self.n_left_params]
else:
return args[self.left.n_inputs : self.left.n_inputs + self.n_left_params]
def _get_right_params_from_args(self, args):
op = self.op
if op == "fix_inputs":
return None
if op == "&":
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params :]
else:
return args[self.left.n_inputs + self.n_left_params :]
def _get_kwarg_model_parameters_as_positional(self, args, kwargs):
# could do it with inserts but rebuilding seems like simpilist way
# TODO: Check if any param names are in kwargs maybe as an intersection of sets?
if self.op == "&":
new_args = list(args[: self.left.n_inputs + self.right.n_inputs])
args_pos = self.left.n_inputs + self.right.n_inputs
else:
new_args = list(args[: self.left.n_inputs])
args_pos = self.left.n_inputs
for param_name in self.param_names:
kw_value = kwargs.pop(param_name, None)
if kw_value is not None:
value = kw_value
else:
try:
value = args[args_pos]
except IndexError:
raise IndexError("Missing parameter or input")
args_pos += 1
new_args.append(value)
return new_args, kwargs
def _apply_operators_to_value_lists(self, leftval, rightval, **kw):
op = self.op
if op == "+":
return binary_operation(operator.add, leftval, rightval)
elif op == "-":
return binary_operation(operator.sub, leftval, rightval)
elif op == "*":
return binary_operation(operator.mul, leftval, rightval)
elif op == "/":
return binary_operation(operator.truediv, leftval, rightval)
elif op == "**":
return binary_operation(operator.pow, leftval, rightval)
elif op == "&":
if not isinstance(leftval, tuple):
leftval = (leftval,)
if not isinstance(rightval, tuple):
rightval = (rightval,)
return leftval + rightval
elif op in SPECIAL_OPERATORS:
return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval)
else:
raise ModelDefinitionError("Unrecognized operator {op}")
def evaluate(self, *args, **kw):
op = self.op
args, kw = self._get_kwarg_model_parameters_as_positional(args, kw)
left_inputs = self._get_left_inputs_from_args(args)
left_params = self._get_left_params_from_args(args)
if op == "fix_inputs":
pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs)))
fixed_inputs = {
key if np.issubdtype(type(key), np.integer) else pos_index[key]: value
for key, value in self.right.items()
}
left_inputs = [
fixed_inputs[ind] if ind in fixed_inputs.keys() else inp
for ind, inp in enumerate(left_inputs)
]
leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params))
if op == "fix_inputs":
return leftval
right_inputs = self._get_right_inputs_from_args(args)
right_params = self._get_right_params_from_args(args)
if op == "|":
if isinstance(leftval, tuple):
return self.right.evaluate(*itertools.chain(leftval, right_params))
else:
return self.right.evaluate(leftval, *right_params)
else:
rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params))
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
@property
def n_submodels(self):
if self._leaflist is None:
self._make_leaflist()
return len(self._leaflist)
@property
def submodel_names(self):
"""Return the names of submodels in a ``CompoundModel``."""
if self._leaflist is None:
self._make_leaflist()
names = [item.name for item in self._leaflist]
nonecount = 0
newnames = []
for item in names:
if item is None:
newnames.append(f"None_{nonecount}")
nonecount += 1
else:
newnames.append(item)
return tuple(newnames)
def both_inverses_exist(self):
"""
if both members of this compound model have inverses return True.
"""
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"CompoundModel.both_inverses_exist is deprecated. Use has_inverse instead.",
AstropyDeprecationWarning,
)
try:
self.left.inverse
self.right.inverse
except NotImplementedError:
return False
return True
def _pre_evaluate(self, *args, **kwargs):
"""
CompoundModel specific input setup that needs to occur prior to
model evaluation.
Note
----
All of the _pre_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
# If equivalencies are provided, necessary to map parameters and pass
# the leaflist as a keyword input for use by model evaluation so that
# the compound model input names can be matched to the model input
# names.
if "equivalencies" in kwargs:
# Restructure to be useful for the individual model lookup
kwargs["inputs_map"] = [
(value[0], (value[1], key)) for key, value in self.inputs_map().items()
]
# Setup actual model evaluation method
def evaluate(_inputs):
return self._evaluate(*_inputs, **kwargs)
return evaluate, args, None, kwargs
@property
def _argnames(self):
"""
No inputs should be used to determine input_shape when handling compound models.
"""
return ()
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
CompoundModel specific post evaluation processing of outputs.
Note
----
All of the _post_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1:
return outputs[0]
return outputs
def _evaluate(self, *args, **kw):
op = self.op
if op != "fix_inputs":
if op != "&":
leftval = self.left(*args, **kw)
if op != "|":
rightval = self.right(*args, **kw)
else:
rightval = None
else:
leftval = self.left(*(args[: self.left.n_inputs]), **kw)
rightval = self.right(*(args[self.left.n_inputs :]), **kw)
if op != "|":
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
elif op == "|":
if isinstance(leftval, tuple):
return self.right(*leftval, **kw)
else:
return self.right(leftval, **kw)
else:
subs = self.right
newargs = list(args)
subinds = []
subvals = []
for key in subs.keys():
if np.issubdtype(type(key), np.integer):
subinds.append(key)
elif isinstance(key, str):
ind = self.left.inputs.index(key)
subinds.append(ind)
subvals.append(subs[key])
# Turn inputs specified in kw into positional indices.
# Names for compound inputs do not propagate to sub models.
kwind = []
kwval = []
for kwkey in list(kw.keys()):
if kwkey in self.inputs:
ind = self.inputs.index(kwkey)
if ind < len(args):
raise ValueError(
"Keyword argument duplicates positional value supplied."
)
kwind.append(ind)
kwval.append(kw[kwkey])
del kw[kwkey]
# Build new argument list
# Append keyword specified args first
if kwind:
kwargs = list(zip(kwind, kwval))
kwargs.sort()
kwindsorted, kwvalsorted = list(zip(*kwargs))
newargs = newargs + list(kwvalsorted)
if subinds:
subargs = list(zip(subinds, subvals))
subargs.sort()
# subindsorted, subvalsorted = list(zip(*subargs))
# The substitutions must be inserted in order
for ind, val in subargs:
newargs.insert(ind, val)
return self.left(*newargs, **kw)
@property
def param_names(self):
"""An ordered list of parameter names."""
return self._param_names
def _make_leaflist(self):
tdict = {}
leaflist = []
make_subtree_dict(self, "", tdict, leaflist)
self._leaflist = leaflist
self._tdict = tdict
def __getattr__(self, name):
"""
If someone accesses an attribute not already defined, map the
parameters, and then see if the requested attribute is one of
the parameters.
"""
# The following test is needed to avoid infinite recursion
# caused by deepcopy. There may be other such cases discovered.
if name == "__setstate__":
raise AttributeError
if name in self._param_names:
return self.__dict__[name]
else:
raise AttributeError(f'Attribute "{name}" not found')
def __getitem__(self, index):
if self._leaflist is None:
self._make_leaflist()
leaflist = self._leaflist
tdict = self._tdict
if isinstance(index, slice):
if index.step:
raise ValueError("Steps in slices not supported for compound models")
if index.start is not None:
if isinstance(index.start, str):
start = self._str_index_to_int(index.start)
else:
start = index.start
else:
start = 0
if index.stop is not None:
if isinstance(index.stop, str):
stop = self._str_index_to_int(index.stop)
else:
stop = index.stop - 1
else:
stop = len(leaflist) - 1
if index.stop == 0:
raise ValueError("Slice endpoint cannot be 0")
if start < 0:
start = len(leaflist) + start
if stop < 0:
stop = len(leaflist) + stop
# now search for matching node:
if stop == start: # only single value, get leaf instead in code below
index = start
else:
for key in tdict:
node, leftind, rightind = tdict[key]
if leftind == start and rightind == stop:
return node
raise IndexError("No appropriate subtree matches slice")
if np.issubdtype(type(index), np.integer):
return leaflist[index]
elif isinstance(index, str):
return leaflist[self._str_index_to_int(index)]
else:
raise TypeError("index must be integer, slice, or model name string")
def _str_index_to_int(self, str_index):
# Search through leaflist for item with that name
found = []
for nleaf, leaf in enumerate(self._leaflist):
if getattr(leaf, "name", None) == str_index:
found.append(nleaf)
if len(found) == 0:
raise IndexError(f"No component with name '{str_index}' found")
if len(found) > 1:
raise IndexError(
f"Multiple components found using '{str_index}' as name\n"
f"at indices {found}"
)
return found[0]
@property
def n_inputs(self):
"""The number of inputs of a model."""
return self._n_inputs
@n_inputs.setter
def n_inputs(self, value):
self._n_inputs = value
@property
def n_outputs(self):
"""The number of outputs of a model."""
return self._n_outputs
@n_outputs.setter
def n_outputs(self, value):
self._n_outputs = value
@property
def eqcons(self):
return self._eqcons
@eqcons.setter
def eqcons(self, value):
self._eqcons = value
@property
def ineqcons(self):
return self._eqcons
@ineqcons.setter
def ineqcons(self, value):
self._eqcons = value
def traverse_postorder(self, include_operator=False):
"""Postorder traversal of the CompoundModel tree."""
res = []
if isinstance(self.left, CompoundModel):
res = res + self.left.traverse_postorder(include_operator)
else:
res = res + [self.left]
if isinstance(self.right, CompoundModel):
res = res + self.right.traverse_postorder(include_operator)
else:
res = res + [self.right]
if include_operator:
res.append(self.op)
else:
res.append(self)
return res
def _format_expression(self, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: f"[{i}]"
for node in self.traverse_postorder():
if not isinstance(node, CompoundModel):
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
right = operands.pop()
left = operands.pop()
if node.op in OPERATOR_PRECEDENCE:
oper_order = OPERATOR_PRECEDENCE[node.op]
if isinstance(node, CompoundModel):
if (
isinstance(node.left, CompoundModel)
and OPERATOR_PRECEDENCE[node.left.op] < oper_order
):
left = f"({left})"
if (
isinstance(node.right, CompoundModel)
and OPERATOR_PRECEDENCE[node.right.op] < oper_order
):
right = f"({right})"
operands.append(" ".join((left, node.op, right)))
else:
left = f"(({left}),"
right = f"({right}))"
operands.append(" ".join((node.op[0], left, right)))
return "".join(operands)
def _format_components(self):
if self._parameters_ is None:
self._map_parameters()
return "\n\n".join(f"[{idx}]: {m!r}" for idx, m in enumerate(self._leaflist))
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
("Expression", expression),
("Components", "\n" + indent(components)),
]
return super()._format_str(keywords=keywords)
def rename(self, name):
self.name = name
return self
@property
def isleaf(self):
return False
@property
def inverse(self):
if self.op == "|":
return self.right.inverse | self.left.inverse
elif self.op == "&":
return self.left.inverse & self.right.inverse
else:
return NotImplemented
@property
def fittable(self):
"""Set the fittable attribute on a compound model."""
if self._fittable is None:
if self._leaflist is None:
self._map_parameters()
self._fittable = all(m.fittable for m in self._leaflist)
return self._fittable
__add__ = _model_oper("+")
__sub__ = _model_oper("-")
__mul__ = _model_oper("*")
__truediv__ = _model_oper("/")
__pow__ = _model_oper("**")
__or__ = _model_oper("|")
__and__ = _model_oper("&")
def _map_parameters(self):
"""
Map all the constituent model parameters to the compound object,
renaming as necessary by appending a suffix number.
This can be an expensive operation, particularly for a complex
expression tree.
All the corresponding parameter attributes are created that one
expects for the Model class.
The parameter objects that the attributes point to are the same
objects as in the constiutent models. Changes made to parameter
values to either are seen by both.
Prior to calling this, none of the associated attributes will
exist. This method must be called to make the model usable by
fitting engines.
If oldnames=True, then parameters are named as in the original
implementation of compound models.
"""
if self._parameters is not None:
# do nothing
return
if self._leaflist is None:
self._make_leaflist()
self._parameters_ = {}
param_map = {}
self._param_names = []
for lindex, leaf in enumerate(self._leaflist):
if not isinstance(leaf, dict):
for param_name in leaf.param_names:
param = getattr(leaf, param_name)
new_param_name = f"{param_name}_{lindex}"
self.__dict__[new_param_name] = param
self._parameters_[new_param_name] = param
self._param_names.append(new_param_name)
param_map[new_param_name] = (lindex, param_name)
self._param_metrics = {}
self._param_map = param_map
self._param_map_inverse = {v: k for k, v in param_map.items()}
self._initialize_slices()
self._param_names = tuple(self._param_names)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name] = {}
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
@staticmethod
def _recursive_lookup(branch, adict, key):
if isinstance(branch, CompoundModel):
return adict[key]
return branch, key
def inputs_map(self):
"""
Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.
"""
inputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {inp: (self, inp) for inp in self.inputs}
elif self.op == "|":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
if isinstance(self.right, CompoundModel):
r_inputs_map = self.right.inputs_map()
for i, inp in enumerate(self.inputs):
if i < len(self.left.inputs): # Get from left
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[self.left.inputs[i]]
else:
inputs_map[inp] = self.left, self.left.inputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
inputs_map[inp] = r_inputs_map[
self.right.inputs[i - len(self.left.inputs)]
]
else:
inputs_map[inp] = (
self.right,
self.right.inputs[i - len(self.left.inputs)],
)
elif self.op == "fix_inputs":
fixed_ind = list(self.right.keys())
ind = [
list(self.left.inputs).index(i) if isinstance(i, str) else i
for i in fixed_ind
]
inp_ind = list(range(self.left.n_inputs))
for i in ind:
inp_ind.remove(i)
for i in inp_ind:
inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i]
else:
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.left.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
return inputs_map
def _parameter_units_for_data_units(self, input_units, output_units):
if self._leaflist is None:
self._map_parameters()
units_for_data = {}
for imodel, model in enumerate(self._leaflist):
units_for_data_leaf = model._parameter_units_for_data_units(
input_units, output_units
)
for param_leaf in units_for_data_leaf:
param = self._param_map_inverse[(imodel, param_leaf)]
units_for_data[param] = units_for_data_leaf[param_leaf]
return units_for_data
@property
def input_units(self):
inputs_map = self.inputs_map()
input_units_dict = {
key: inputs_map[key][0].input_units[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units is not None
}
if input_units_dict:
return input_units_dict
return None
@property
def input_units_equivalencies(self):
inputs_map = self.inputs_map()
input_units_equivalencies_dict = {
key: inputs_map[key][0].input_units_equivalencies[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units_equivalencies is not None
}
if not input_units_equivalencies_dict:
return None
return input_units_equivalencies_dict
@property
def input_units_allow_dimensionless(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_allow_dimensionless[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def input_units_strict(self):
inputs_map = self.inputs_map()
return {
key: inputs_map[key][0].input_units_strict[orig_key]
for key, (mod, orig_key) in inputs_map.items()
}
@property
def return_units(self):
outputs_map = self.outputs_map()
return {
key: outputs_map[key][0].return_units[orig_key]
for key, (mod, orig_key) in outputs_map.items()
if outputs_map[key][0].return_units is not None
}
def outputs_map(self):
"""
Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.
"""
outputs_map = {}
if not isinstance(
self.op, str
): # If we don't have an operator the mapping is trivial
return {out: (self, out) for out in self.outputs}
elif self.op == "|":
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for out in self.outputs:
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[out]
else:
outputs_map[out] = self.right, out
elif self.op == "&":
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for i, out in enumerate(self.outputs):
if i < len(self.left.outputs): # Get from left
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map[self.left.outputs[i]]
else:
outputs_map[out] = self.left, self.left.outputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[
self.right.outputs[i - len(self.left.outputs)]
]
else:
outputs_map[out] = (
self.right,
self.right.outputs[i - len(self.left.outputs)],
)
elif self.op == "fix_inputs":
return self.left.outputs_map()
else:
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
for out in self.left.outputs:
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map()[out]
else:
outputs_map[out] = self.left, out
return outputs_map
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = self.get_bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel, important when using
# add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
def replace_submodel(self, name, model):
"""
Construct a new `~astropy.modeling.CompoundModel` instance from an
existing CompoundModel, replacing the named submodel with a new model.
In order to ensure that inverses and names are kept/reconstructed, it's
necessary to rebuild the CompoundModel from the replaced node all the
way back to the base. The original CompoundModel is left untouched.
Parameters
----------
name : str
name of submodel to be replaced
model : `~astropy.modeling.Model`
replacement model
"""
submodels = [
m for m in self.traverse_postorder() if getattr(m, "name", None) == name
]
if submodels:
if len(submodels) > 1:
raise ValueError(f"More than one submodel named {name}")
old_model = submodels.pop()
if len(old_model) != len(model):
raise ValueError(
"New and old models must have equal values for n_models"
)
# Do this check first in order to raise a more helpful Exception,
# although it would fail trying to construct the new CompoundModel
if (
old_model.n_inputs != model.n_inputs
or old_model.n_outputs != model.n_outputs
):
raise ValueError(
"New model must match numbers of inputs and "
"outputs of existing model"
)
tree = _get_submodel_path(self, name)
while tree:
branch = self.copy()
for node in tree[:-1]:
branch = getattr(branch, node)
setattr(branch, tree[-1], model)
model = CompoundModel(
branch.op, branch.left, branch.right, name=branch.name
)
tree = tree[:-1]
return model
else:
raise ValueError(f"No submodels found named {name}")
def _set_sub_models_and_parameter_units(self, left, right):
"""
Provides a work-around to properly set the sub models and respective
parameters's units/values when using ``without_units_for_data``
or ``without_units_for_data`` methods.
"""
model = CompoundModel(self.op, left, right)
self.left = left
self.right = right
for name in model.param_names:
model_parameter = getattr(model, name)
parameter = getattr(self, name)
parameter.value = model_parameter.value
parameter._set_unit(model_parameter.unit, force=True)
def without_units_for_data(self, **kwargs):
"""
See `~astropy.modeling.Model.without_units_for_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. It does this
by modifying the output units of each sub model by using the output
units of the other sub model so that we can apply the original function
and get the desired result.
Additional data has to be output in the mixed output unit case
so that the units can be properly rebuilt by
`~astropy.modeling.CompoundModel.with_units_from_data`.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
model = self.copy()
inputs = {inp: kwargs[inp] for inp in self.inputs}
left_units = self.left.output_units(**kwargs)
right_units = self.right.output_units(**kwargs)
if self.op == "*":
left_kwargs = {
out: kwargs[out] / right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: kwargs[out] / left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
else:
left_kwargs = {
out: kwargs[out] * right_units[out]
for out in self.left.outputs
if kwargs[out] is not None
}
right_kwargs = {
out: 1 / kwargs[out] * left_units[out]
for out in self.right.outputs
if kwargs[out] is not None
}
left_kwargs.update(inputs.copy())
right_kwargs.update(inputs.copy())
left = self.left.without_units_for_data(**left_kwargs)
if isinstance(left, tuple):
left_kwargs["_left_kwargs"] = left[1]
left_kwargs["_right_kwargs"] = left[2]
left = left[0]
right = self.right.without_units_for_data(**right_kwargs)
if isinstance(right, tuple):
right_kwargs["_left_kwargs"] = right[1]
right_kwargs["_right_kwargs"] = right[2]
right = right[0]
model._set_sub_models_and_parameter_units(left, right)
return model, left_kwargs, right_kwargs
else:
return super().without_units_for_data(**kwargs)
def with_units_from_data(self, **kwargs):
"""
See `~astropy.modeling.Model.with_units_from_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. In order to
do this it requires some additional information output by
`~astropy.modeling.CompoundModel.without_units_for_data` passed as
keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ["*", "/"]:
left_kwargs = kwargs.pop("_left_kwargs")
right_kwargs = kwargs.pop("_right_kwargs")
left = self.left.with_units_from_data(**left_kwargs)
right = self.right.with_units_from_data(**right_kwargs)
model = self.copy()
model._set_sub_models_and_parameter_units(left, right)
return model
else:
return super().with_units_from_data(**kwargs)
def _get_submodel_path(model, name):
"""Find the route down a CompoundModel's tree to the model with the
specified name (whether it's a leaf or not).
"""
if getattr(model, "name", None) == name:
return []
try:
return ["left"] + _get_submodel_path(model.left, name)
except (AttributeError, TypeError):
pass
try:
return ["right"] + _get_submodel_path(model.right, name)
except (AttributeError, TypeError):
pass
def binary_operation(binoperator, left, right):
"""
Perform binary operation. Operands may be matching tuples of operands.
"""
if isinstance(left, tuple) and isinstance(right, tuple):
return tuple(binoperator(item[0], item[1]) for item in zip(left, right))
return binoperator(left, right)
def get_ops(tree, opset):
"""
Recursive function to collect operators used.
"""
if isinstance(tree, CompoundModel):
opset.add(tree.op)
get_ops(tree.left, opset)
get_ops(tree.right, opset)
else:
return
def make_subtree_dict(tree, nodepath, tdict, leaflist):
"""Traverse a tree noting each node by a key.
The key indicates all the left/right choices necessary to reach that node.
Each key will reference a tuple that contains:
- reference to the compound model for that node.
- left most index contained within that subtree
(relative to all indices for the whole tree)
- right most index contained within that subtree
"""
# if this is a leaf, just append it to the leaflist
if not hasattr(tree, "isleaf"):
leaflist.append(tree)
else:
leftmostind = len(leaflist)
make_subtree_dict(tree.left, nodepath + "l", tdict, leaflist)
make_subtree_dict(tree.right, nodepath + "r", tdict, leaflist)
rightmostind = len(leaflist) - 1
tdict[nodepath] = (tree, leftmostind, rightmostind)
_ORDER_OF_OPERATORS = [("fix_inputs",), ("|",), ("&",), ("+", "-"), ("*", "/"), ("**",)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None):
"""
This function creates a compound model with one or more of the input
values of the input model assigned fixed values (scalar or array).
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that one or more of the
model input values will be fixed to some constant value.
values : dict
A dictionary where the key identifies which input to fix
and its value is the value to fix it at. The key may either be the
name of the input or a number reflecting its order in the inputs.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> g = Gaussian2D(1, 2, 3, 4, 5)
>>> gv = fix_inputs(g, {0: 2.5})
Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y)
"""
model = CompoundModel("fix_inputs", modelinstance, values)
if bounding_boxes is not None:
if selector_args is None:
selector_args = tuple((key, True) for key in values.keys())
bbox = CompoundBoundingBox.validate(
modelinstance, bounding_boxes, selector_args
)
_selector = bbox.selector_args.get_fixed_values(modelinstance, values)
new_bbox = bbox[_selector]
new_bbox = new_bbox.__class__.validate(model, new_bbox)
model.bounding_box = new_bbox
return model
def bind_bounding_box(modelinstance, bounding_box, ignored=None, order="C"):
"""
Set a validated bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated bounding box will be set on.
bounding_box : tuple
A bounding box tuple, see :ref:`astropy:bounding-boxes` for details
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = ModelBoundingBox.validate(
modelinstance, bounding_box, ignored=ignored, order=order
)
def bind_compound_bounding_box(
modelinstance,
bounding_boxes,
selector_args,
create_selector=None,
ignored=None,
order="C",
):
"""
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = CompoundBoundingBox.validate(
modelinstance,
bounding_boxes,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
)
def custom_model(*args, fit_deriv=None):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
The model is separable only if there is a single input.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
The standard settable model properties can be configured by default
using keyword arguments matching the name of the property; however,
these values are not set as model "parameters". Moreover, users
cannot use keyword arguments matching non-settable model properties,
with the exception of ``n_outputs`` which should be set to the number of
outputs of your function.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
f"{__name__} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any)."
)
def _custom_model_inputs(func):
"""
Processes the inputs to the `custom_model`'s function into the appropriate
categories.
Parameters
----------
func : callable
Returns
-------
inputs : list
list of evaluation inputs
special_params : dict
dictionary of model properties which require special treatment
settable_params : dict
dictionary of defaults for settable model properties
params : dict
dictionary of model parameters set by `custom_model`'s function
"""
inputs, parameters = get_inputs_and_params(func)
special = ["n_outputs"]
settable = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is not None
]
properties = [
attr
for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is None and attr not in special
]
special_params = {}
settable_params = {}
params = {}
for param in parameters:
if param.name in special:
special_params[param.name] = param.default
elif param.name in settable:
settable_params[param.name] = param.default
elif param.name in properties:
raise ValueError(
f"Parameter '{param.name}' cannot be a model property: {properties}."
)
else:
params[param.name] = param.default
return inputs, special_params, settable_params, params
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable object"
)
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other callable object"
)
model_name = func.__name__
inputs, special_params, settable_params, params = _custom_model_inputs(func)
if fit_deriv is not None and len(fit_deriv.__defaults__) != len(params):
raise ModelDefinitionError(
"derivative function should accept same number of parameters as func."
)
params = {
param: Parameter(param, default=default) for param, default in params.items()
}
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = "__main__"
members = {
"__module__": str(modname),
"__doc__": func.__doc__,
"n_inputs": len(inputs),
"n_outputs": special_params.pop("n_outputs", 1),
"evaluate": staticmethod(func),
"_settable_properties": settable_params,
}
if fit_deriv is not None:
members["fit_deriv"] = staticmethod(fit_deriv)
members.update(params)
cls = type(model_name, (FittableModel,), members)
cls._separable = len(inputs) == 1
return cls
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from
``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError("If no bounding_box is set, coords or arr must be input.")
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError(
"number of array dimensions inconsistent with number of model inputs."
)
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError(
"coordinate length inconsistent with the number of model inputs."
)
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError("coordinate shape inconsistent with the array shape.")
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input"
" arr in one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def hide_inverse(model):
"""
This is a convenience function intended to disable automatic generation
of the inverse in compound models by disabling one of the constituent
model's inverse. This is to handle cases where user provided inverse
functions are not compatible within an expression.
For example::
compound_model.inverse = hide_inverse(m1) + m2 + m3
This will insure that the defined inverse itself won't attempt to
build its own inverse, which would otherwise fail in this example
(e.g., m = m1 + m2 + m3 happens to raises an exception for this
reason.)
Note that this permanently disables it. To prevent that either copy
the model or restore the inverse later.
"""
del model.inverse
return model
|
4d9cd8f1fed9b2749666c8a1e6302402031cbe0ef86dd749d4f8a7c00c8081f2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from .core import Fittable1DModel, Fittable2DModel
from .parameters import InputParameterError, Parameter
from .utils import ellipse_extent
__all__ = [
"AiryDisk2D",
"Moffat1D",
"Moffat2D",
"Box1D",
"Box2D",
"Const1D",
"Const2D",
"Ellipse2D",
"Disk2D",
"Gaussian1D",
"Gaussian2D",
"Linear1D",
"Lorentz1D",
"RickerWavelet1D",
"RickerWavelet2D",
"RedshiftScaleFactor",
"Multiply",
"Planar2D",
"Scale",
"Sersic1D",
"Sersic2D",
"Shift",
"Sine1D",
"Cosine1D",
"Tangent1D",
"ArcSine1D",
"ArcCosine1D",
"ArcTangent1D",
"Trapezoid1D",
"TrapezoidDisk2D",
"Ring2D",
"Voigt1D",
"KingProjectedAnalytic1D",
"Exponential1D",
"Logarithmic1D",
]
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Gaussian"
)
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Standard deviation of the Gaussian",
)
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(-0.5 * (x - mean) ** 2 / stddev**2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev**2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev**2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev**3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
return {self.inputs[0]: self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"mean": inputs_unit[self.inputs[0]],
"stddev": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise. Must be `None` if a covariance matrix
(``cov_matrix``) is provided. If no ``cov_matrix`` is given,
`None` means the default value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(
\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}
\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(
default=0, description="Peak position (along x axis) of Gaussian"
)
y_mean = Parameter(
default=0, description="Peak position (along y axis) of Gaussian"
)
x_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along x axis)"
)
y_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along y axis)"
)
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a "
"float (in radians) or a "
"|Quantity| angle (optional)"
),
)
def __init__(
self,
amplitude=amplitude.default,
x_mean=x_mean.default,
y_mean=y_mean.default,
x_stddev=None,
y_stddev=None,
theta=None,
cov_matrix=None,
**kwargs,
):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError(
"Cannot specify both cov_matrix and x/y_stddev/theta"
)
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault("bounds", {})
kwargs["bounds"].setdefault("x_stddev", (FLOAT_EPSILON, None))
kwargs["bounds"].setdefault("y_stddev", (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude,
x_mean=x_mean,
y_mean=y_mean,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
**kwargs,
)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``.
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-5.5, upper=5.5)
y: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-2.0, upper=2.0)
y: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
dx, dy = ellipse_extent(a, b, self.theta)
return (
(self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx),
)
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function."""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(
-((a * xdiff**2) + (b * xdiff * ydiff) + (c * ydiff**2))
)
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters."""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2.0 * theta)
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xstd3 = x_stddev**3
ystd3 = y_stddev**3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff**2
ydiff2 = ydiff**2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2)))
da_dtheta = sint * cost * ((1.0 / ystd2) - (1.0 / xstd2))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2.0 * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2.0 * c * ydiff))
dg_dx_stddev = g * (
-(
da_dx_stddev * xdiff2
+ db_dx_stddev * xdiff * ydiff
+ dc_dx_stddev * ydiff2
)
)
dg_dy_stddev = g * (
-(
da_dy_stddev * xdiff2
+ db_dy_stddev * xdiff * ydiff
+ dc_dy_stddev * ydiff2
)
)
dg_dtheta = g * (
-(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2)
)
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
return {self.inputs[0]: self.x_mean.unit, self.inputs[1]: self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_mean": inputs_unit[self.inputs[0]],
"y_mean": inputs_unit[self.inputs[0]],
"x_stddev": inputs_unit[self.inputs[0]],
"y_stddev": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.unit is None:
return None
return {self.inputs[0]: self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function."""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.offset) for x in self.bounding_box
)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function."""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model."""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter."""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"offset": outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.unit is None:
return None
return {self.inputs[0]: self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function."""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function."""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description="Redshift", default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function."""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative."""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model."""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.z) for x in self.bounding_box.bounding_box()
)
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
return amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)
)
@property
def input_units(self):
if self.r_eff.unit is None:
return None
return {self.inputs[0]: self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_eff": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class _Trigonometric1D(Fittable1DModel):
"""
Base class for one dimensional trigonometric and inverse trigonometric models.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@property
def input_units(self):
if self.frequency.unit is None:
return None
return {self.inputs[0]: 1.0 / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": inputs_unit[self.inputs[0]] ** -1,
"amplitude": outputs_unit[self.outputs[0]],
}
class Sine1D(_Trigonometric1D):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative."""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (
TWOPI * x * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = TWOPI * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Sine."""
return ArcSine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Cosine1D(_Trigonometric1D):
"""
One dimensional Cosine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Cosine1D
plt.figure()
s1 = Cosine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Cosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.cos(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Cosine model derivative."""
d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase)
d_frequency = -(
TWOPI * x * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = -(TWOPI * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Cosine."""
return ArcCosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Tangent1D(_Trigonometric1D):
"""
One dimensional Tangent model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Sine1D, Cosine1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p)
Note that the tangent function is undefined for inputs of the form
pi/2 + n*pi for all integers n. Thus thus the default bounding box
has been restricted to:
.. math:: [(-1/4 - p)/f, (1/4 - p)/f]
which is the smallest interval for the tangent function to be continuous
on.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Tangent1D
plt.figure()
s1 = Tangent1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Tangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.tan(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Tangent model derivative."""
sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase)) ** 2
d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase)
d_frequency = TWOPI * x * amplitude * sec
d_phase = TWOPI * amplitude * sec
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Tangent."""
return ArcTangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
bbox = [
(-1 / 4 - self.phase) / self.frequency,
(1 / 4 - self.phase) / self.frequency,
]
if self.frequency.unit is not None:
bbox = bbox / self.frequency.unit
return bbox
class _InverseTrigonometric1D(_Trigonometric1D):
"""
Base class for one dimensional inverse trigonometric models.
"""
@property
def input_units(self):
if self.amplitude.unit is None:
return None
return {self.inputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": outputs_unit[self.outputs[0]] ** -1,
"amplitude": inputs_unit[self.inputs[0]],
}
class ArcSine1D(_InverseTrigonometric1D):
"""
One dimensional ArcSine model returning values between -pi/2 and pi/2
only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Sine
frequency : float
Oscillation frequency for corresponding Sine
phase : float
Oscillation phase for corresponding Sine
See Also
--------
Sine1D, ArcCosine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f
The arcsin function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcSine1D
plt.figure()
s1 = ArcSine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcSine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_sine = np.arcsin(argument) / TWOPI
return (arc_sine - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcSine model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcSine."""
return Sine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcCosine1D(_InverseTrigonometric1D):
"""
One dimensional ArcCosine returning values between 0 and pi only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Cosine
frequency : float
Oscillation frequency for corresponding Cosine
phase : float
Oscillation phase for corresponding Cosine
See Also
--------
Cosine1D, ArcSine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f
The arccos function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcCosine1D
plt.figure()
s1 = ArcCosine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, 0, np.pi])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arccos(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model derivative."""
d_amplitude = x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcCosine."""
return Cosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcTangent1D(_InverseTrigonometric1D):
"""
One dimensional ArcTangent model returning values between -pi/2 and
pi/2 only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Tangent
frequency : float
Oscillation frequency for corresponding Tangent
phase : float
Oscillation phase for corresponding Tangent
See Also
--------
Tangent1D, ArcSine1D, ArcCosine1D
Notes
-----
Model formula:
.. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcTangent1D
plt.figure()
s1 = ArcTangent1D(amplitude=1, frequency=.25)
r=np.arange(-10, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-10, 10, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arctan(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * (1 + (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of ArcTangent."""
return Tangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function."""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters."""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope**-1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
return {self.inputs[0]: self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit[self.outputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
}
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function."""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters."""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit["z"],
"slope_x": outputs_unit["z"] / inputs_unit["x"],
"slope_y": outputs_unit["z"] / inputs_unit["y"],
}
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value - for a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm)
x_0 : float or `~astropy.units.Quantity`.
Position of the peak
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM)
See Also
--------
Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is half of given FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function."""
return amplitude * ((fwhm / 2.0) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.0) ** 2)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters."""
d_amplitude = fwhm**2 / (fwhm**2 + (x - x_0) ** 2)
d_x_0 = (
amplitude * d_amplitude * (2 * x - 2 * x_0) / (fwhm**2 + (x - x_0) ** 2)
)
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float or `~astropy.units.Quantity`
Position of the peak
amplitude_L : float or `~astropy.units.Quantity`.
The Lorentzian amplitude (peak of the associated Lorentz function)
- for a normalized profile (integrating to 1), set
amplitude_L = 2 / (np.pi * fwhm_L)
fwhm_L : float or `~astropy.units.Quantity`
The Lorentzian full width at half maximum
fwhm_G : float or `~astropy.units.Quantity`.
The Gaussian full width at half maximum
method : str, optional
Algorithm for computing the complex error function; one of
'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or
'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and
reference in accuracy).
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided
consistently with compatible units or as unitless numbers.
Voigt function is calculated as real part of the complex error function computed from either
Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following
Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or
`~scipy.special.wofz` (implementing 'Faddeeva.cc').
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0, description="Position of the peak")
amplitude_L = Parameter(default=1, description="The Lorentzian amplitude")
fwhm_L = Parameter(
default=2 / np.pi, description="The Lorentzian full width at half maximum"
)
fwhm_G = Parameter(
default=np.log(2), description="The Gaussian full width at half maximum"
)
sqrt_pi = np.sqrt(np.pi)
sqrt_ln2 = np.sqrt(np.log(2))
sqrt_ln2pi = np.sqrt(np.log(2) * np.pi)
_last_z = np.zeros(1, dtype=complex)
_last_w = np.zeros(1, dtype=float)
_faddeeva = None
def __init__(
self,
x_0=x_0.default,
amplitude_L=amplitude_L.default,
fwhm_L=fwhm_L.default,
fwhm_G=fwhm_G.default,
method="humlicek2",
**kwargs,
):
if str(method).lower() in ("wofz", "scipy"):
from scipy.special import wofz
self._faddeeva = wofz
elif str(method).lower() == "humlicek2":
self._faddeeva = self._hum2zpf16c
else:
raise ValueError(
f"Not a valid method for Voigt1D Faddeeva function: {method}."
)
self.method = self._faddeeva.__name__
super().__init__(
x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs
)
def _wrap_wofz(self, z):
"""Call complex error (Faddeeva) function w(z) implemented by algorithm `method`;
cache results for consecutive calls from `evaluate`, `fit_deriv`.
"""
if z.shape == self._last_z.shape and np.allclose(
z, self._last_z, rtol=1.0e-14, atol=1.0e-15
):
return self._last_w
self._last_w = self._faddeeva(z)
self._last_z = z
return self._last_w
def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""One dimensional Voigt function scaled to Lorentz peak amplitude."""
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G
# The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ;
# for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L
return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L
def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""
Derivative of the one dimensional Voigt function with respect to parameters.
"""
s = self.sqrt_ln2 / fwhm_G
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s
# V * constant from McLean implementation (== their Voigt function)
w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi
# Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L)
dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L
return [
-dwdz.real * 2 * s,
w.real / amplitude_L,
w.real / fwhm_L - dwdz.imag * s,
(-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G,
]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm_L": inputs_unit[self.inputs[0]],
"fwhm_G": inputs_unit[self.inputs[0]],
"amplitude_L": outputs_unit[self.outputs[0]],
}
@staticmethod
def _hum2zpf16c(z, s=10.0):
"""Complex error function w(z = x + iy) combining Humlicek's rational approximations.
|x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II;
else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35
Version using a mask and np.place;
single complex argument version of Franz Schreier's cpfX.hum2zpf16m.
Originally licensed under a 3-clause BSD style license - see
https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py
"""
# Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35
# fmt: off
AA = np.array(
[
+46236.3358828121, -147726.58393079657j,
-206562.80451354137, 281369.1590631087j,
+183092.74968253175, -184787.96830696272j,
-66155.39578477248, 57778.05827983565j,
+11682.770904216826, -9442.402767960672j,
-1052.8438624933142, 814.0996198624186j,
+45.94499030751872, -34.59751573708725j,
-0.7616559377907136, 0.5641895835476449j,
]
) # 1j/sqrt(pi) to the 12. digit
bb = np.array(
[
+7918.06640624997,
-126689.0625,
+295607.8125,
-236486.25,
+84459.375,
-15015.0,
+1365.0,
-60.0,
+1.0,
]
)
# fmt: on
sqrt_piinv = 1.0 / np.sqrt(np.pi)
zz = z * z
w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz * (zz - 3.0))
if np.any(z.imag < s):
mask = abs(z.real) + z.imag < s # returns true for interior points
# returns small complex array covering only the interior region
Z = z[np.where(mask)] + 1.35j
ZZ = Z * Z
# fmt: off
# Recursive algorithms for the polynomials in Z with coefficients AA, bb
# numer = 0.0
# for A in AA[::-1]:
# numer = numer * Z + A
# Explicitly unrolled above loop for speed
numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z +
AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z +
AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0])
# denom = 0.0
# for b in bb[::-1]:
# denom = denom * ZZ + b
# Explicitly unrolled above loop for speed
denom = (((((((ZZ + bb[7])*ZZ + bb[6])*ZZ + bb[5])*ZZ+bb[4])*ZZ + bb[3])*ZZ +
bb[2])*ZZ + bb[1])*ZZ + bb[0]
# fmt: on
np.place(w, mask, numer / denom)
return w
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters."""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the ellipse", mag=True)
x_0 = Parameter(default=0, description="X position of the center of the disk.")
y_0 = Parameter(default=0, description="Y position of the center of the disk.")
a = Parameter(default=1, description="The length of the semimajor axis")
b = Parameter(default=1, description="The length of the semiminor axis")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = ((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.0
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"a": inputs_unit[self.inputs[0]],
"b": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Value of disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of the disk")
y_0 = Parameter(default=0, description="Y position of center of the disk")
R_0 = Parameter(default=1, description="Radius of the disk")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0**2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return (
(self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0),
)
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(
self,
amplitude=amplitude.default,
x_0=x_0.default,
y_0=y_0.default,
r_in=None,
width=None,
r_out=None,
**kwargs,
):
if (r_in is None) and (r_out is None) and (width is None):
r_in = self.r_in.default
width = self.width.default
elif (r_in is not None) and (r_out is None) and (width is None):
width = self.width.default
elif (r_in is None) and (r_out is not None) and (width is None):
r_in = self.r_in.default
width = r_out - r_in
elif (r_in is None) and (r_out is None) and (width is not None):
r_in = self.r_in.default
elif (r_in is not None) and (r_out is not None) and (width is None):
width = r_out - r_in
elif (r_in is None) and (r_out is not None) and (width is not None):
r_in = r_out - width
elif (r_in is not None) and (r_out is not None) and (width is not None):
if np.any(width != (r_out - r_in)):
raise InputParameterError("Width must be r_out - r_in")
if np.any(r_in < 0) or np.any(width < 0):
raise InputParameterError(f"{r_in=} and {width=} must both be >=0")
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in**2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_in": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A", mag=True)
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function."""
inside = np.logical_and(x >= x_0 - width / 2.0, x <= x_0 + width / 2.0)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Amplitude", mag=True)
x_0 = Parameter(
default=0, description="X position of the center of the box function"
)
y_0 = Parameter(
default=0, description="Y position of the center of the box function"
)
x_width = Parameter(default=1, description="Width in x direction of the box")
y_width = Parameter(default=1, description="Width in y direction of the box")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function."""
x_range = np.logical_and(x >= x_0 - x_width / 2.0, x <= x_0 + x_width / 2.0)
y_range = np.logical_and(y >= y_0 - y_width / 2.0, y <= y_0 + y_width / 2.0)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[1]],
"x_width": inputs_unit[self.inputs[0]],
"y_width": inputs_unit[self.inputs[1]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="Center position of the trapezoid")
width = Parameter(default=1, description="Width of constant part of the trapezoid")
slope = Parameter(default=1, description="Slope of the tails of trapezoid")
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function."""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.0
x3 = x_0 + width / 2.0
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="X position of the center of the trapezoid")
y_0 = Parameter(default=0, description="Y position of the center of the trapezoid")
R_0 = Parameter(default=1, description="Radius of constant part of trapezoid")
slope = Parameter(
default=1, description="Slope of tails of trapezoid in x direction"
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function."""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit["x"] != inputs_unit["y"]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet1D(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function."""
xx_ww = (x - x_0) ** 2 / (2 * sigma**2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet2D(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function."""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma**2)
return amplitude * (1 - rr_ww) * np.exp(-rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[
\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}
\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Airy function"
)
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(
default=1,
description="The radius of the Airy disk (radius of first zero crossing)",
)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function."""
if cls._rz is None:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False, subok=True)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"radius": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function."""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters."""
fac = 1 + (x - x_0) ** 2 / gamma**2
d_A = fac ** (-alpha)
d_x_0 = 2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma**2)
d_gamma = 2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma**3)
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the model")
x_0 = Parameter(
default=0, description="X position of the maximum of the Moffat model"
)
y_0 = Parameter(
default=0, description="Y position of the maximum of the Moffat model"
)
gamma = Parameter(default=1, description="Core width of the Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = 2 * amplitude * alpha * d_A * (x - x_0) / (gamma**2 * (1 + rr_gg))
d_y_0 = 2 * amplitude * alpha * d_A * (y - y_0) / (gamma**2 * (1 + rr_gg))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = 2 * amplitude * alpha * d_A * rr_gg / (gamma * (1 + rr_gg))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{
-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]
\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (2n,b_n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
x_0 = Parameter(default=0, description="X position of the center")
y_0 = Parameter(default=0, description="Y position of the center")
ellip = Parameter(default=0, description="Ellipticity")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
bn = cls._gammaincinv(2.0 * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_eff": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class KingProjectedAnalytic1D(Fittable1DModel):
"""
Projected (surface density) analytic King Model.
Parameters
----------
amplitude : float
Amplitude or scaling factor.
r_core : float
Core radius (f(r_c) ~ 0.5 f_0)
r_tide : float
Tidal radius.
Notes
-----
This model approximates a King model with an analytic function. The derivation of this
equation can be found in King '62 (equation 14). This is just an approximation of the
full model and the parameters derived from this model should be taken with caution.
It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2.
Model formula:
.. math::
f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} -
\\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import KingProjectedAnalytic1D
import matplotlib.pyplot as plt
plt.figure()
rt_list = [1, 2, 5, 10, 20]
for rt in rt_list:
r = np.linspace(0.1, rt, 100)
mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt)
sig = mod(r)
plt.loglog(r, sig/sig[0], label=f"c ~ {mod.concentration:0.2f}")
plt.xlabel("r")
plt.ylabel(r"$\\sigma/\\sigma_0$")
plt.legend()
plt.show()
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K
"""
amplitude = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Amplitude or scaling factor",
)
r_core = Parameter(
default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius"
)
r_tide = Parameter(
default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius"
)
@property
def concentration(self):
"""Concentration parameter of the king model."""
return np.log10(np.abs(self.r_tide / self.r_core))
@staticmethod
def evaluate(x, amplitude, r_core, r_tide):
"""
Analytic King model function.
"""
result = (
amplitude
* r_core**2
* (
1 / np.sqrt(x**2 + r_core**2)
- 1 / np.sqrt(r_tide**2 + r_core**2)
)
** 2
)
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
result[bounds] = result[bounds] * 0.0
return result
@staticmethod
def fit_deriv(x, amplitude, r_core, r_tide):
"""
Analytic King model function derivatives.
"""
d_amplitude = (
r_core**2
* (
1 / np.sqrt(x**2 + r_core**2)
- 1 / np.sqrt(r_tide**2 + r_core**2)
)
** 2
)
d_r_core = (
2
* amplitude
* r_core**2
* (
r_core / (r_core**2 + r_tide**2) ** (3 / 2)
- r_core / (r_core**2 + x**2) ** (3 / 2)
)
* (
1.0 / np.sqrt(r_core**2 + x**2)
- 1.0 / np.sqrt(r_core**2 + r_tide**2)
)
+ 2
* amplitude
* r_core
* (
1.0 / np.sqrt(r_core**2 + x**2)
- 1.0 / np.sqrt(r_core**2 + r_tide**2)
)
** 2
)
d_r_tide = (
2
* amplitude
* r_core**2
* r_tide
* (
1.0 / np.sqrt(r_core**2 + x**2)
- 1.0 / np.sqrt(r_core**2 + r_tide**2)
)
) / (r_core**2 + r_tide**2) ** (3 / 2)
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
d_amplitude[bounds] = d_amplitude[bounds] * 0
d_r_core[bounds] = d_r_core[bounds] * 0
d_r_tide[bounds] = d_r_tide[bounds] * 0
return [d_amplitude, d_r_core, d_r_tide]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
The model is not defined for r > r_tide.
``(r_low, r_high)``
"""
return (0 * self.r_tide, 1 * self.r_tide)
@property
def input_units(self):
if self.r_core.unit is None:
return None
return {self.inputs[0]: self.r_core.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_core": inputs_unit[self.inputs[0]],
"r_tide": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Logarithmic1D(Fittable1DModel):
"""
One dimensional logarithmic model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Exponential1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.log(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
d_amplitude = np.log(x / tau)
d_tau = np.zeros(x.shape) - (amplitude / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Exponential1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Exponential1D(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
"""Derivative with respect to parameters."""
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
"""tau cannot be 0."""
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
|
ee2d49af11cadc3671ecb03676f604c27eeca435b5d94bc44cb7bcec69cd3b2b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Models that have physical origins.
"""
# pylint: disable=invalid-name, no-member
import warnings
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
from .core import Fittable1DModel
from .parameters import InputParameterError, Parameter
__all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"]
class BlackBody(Fittable1DModel):
"""
Blackbody model using the Planck function.
Parameters
----------
temperature : `~astropy.units.Quantity` ['temperature']
Blackbody temperature.
scale : float or `~astropy.units.Quantity` ['dimensionless']
Scale factor. If dimensionless, input units will assumed
to be in Hz and output units in (erg / (cm ** 2 * s * Hz * sr).
If not dimensionless, must be equivalent to either
(erg / (cm ** 2 * s * Hz * sr) or erg / (cm ** 2 * s * AA * sr),
in which case the result will be returned in the requested units and
the scale will be stripped of units (with the float value applied).
Notes
-----
Model formula:
.. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody(temperature=5000*u.K)
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.53254685e-05 erg / (cm2 Hz s sr)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav)
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a scale.
temperature = Parameter(
default=5000.0, min=0, unit=u.K, description="Blackbody temperature"
)
scale = Parameter(default=1.0, min=0, description="Scale factor")
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz or wavelengths
# in AA (depending on the choice of output units controlled by units on scale
# and stored in self._output_units during init).
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {"x": u.spectral()}
# Store the native units returned by B_nu equation
_native_units = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
# Store the base native output units. If scale is not dimensionless, it
# must be equivalent to one of these. If equivalent to SLAM, then
# input_units will expect AA for 'x', otherwise Hz.
_native_output_units = {
"SNU": u.erg / (u.cm**2 * u.s * u.Hz * u.sr),
"SLAM": u.erg / (u.cm**2 * u.s * u.AA * u.sr),
}
def __init__(self, *args, **kwargs):
scale = kwargs.get("scale", None)
# Support scale with non-dimensionless unit by stripping the unit and
# storing as self._output_units.
if hasattr(scale, "unit") and not scale.unit.is_equivalent(
u.dimensionless_unscaled
):
output_units = scale.unit
if not output_units.is_equivalent(
self._native_units, u.spectral_density(1 * u.AA)
):
raise ValueError(
"scale units not dimensionless or in "
f"surface brightness: {output_units}"
)
kwargs["scale"] = scale.value
self._output_units = output_units
else:
self._output_units = self._native_units
return super().__init__(*args, **kwargs)
def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz (or AA if `scale` was initialized with units
equivalent to erg / (cm ** 2 * s * AA * sr)).
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
if not isinstance(x, u.Quantity):
# then we assume it has input_units which depends on the
# requested output units (either Hz or AA)
in_x = u.Quantity(x, self.input_units["x"])
else:
in_x = x
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq**3 / (const.c**2 * boltzm1) / u.sr
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
if not hasattr(scale, "unit"):
# during fitting, scale will be passed without units
# but we still need to convert from the input dimensionless
# to dimensionless unscaled
scale = scale * self.scale.unit
scale = scale.to(u.dimensionless_unscaled).value
# NOTE: scale is already stripped of any input units
y = scale * bb_nu.to(self._output_units, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
return y.value
@property
def input_units(self):
# The input units are those of the 'x' value, which will depend on the
# units compatible with the expected output units.
if self._output_units.is_equivalent(self._native_output_units["SNU"]):
return {self.inputs[0]: u.Hz}
else:
# only other option is equivalent with SLAM
return {self.inputs[0]: u.AA}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"temperature": u.K}
@property
def bolometric_flux(self):
"""Bolometric flux."""
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
scale = self.scale.quantity.to(u.dimensionless_unscaled)
else:
scale = self.scale.value
# bolometric flux in the native units of the planck function
native_bolflux = scale * const.sigma_sb * self.temperature**4 / np.pi
# return in more "astro" units
return native_bolflux.to(u.erg / (u.cm**2 * u.s))
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
@property
def nu_max(self):
"""Peak frequency when the curve is expressed as power density."""
return 2.8214391 * const.k_B * self.temperature / const.h
class Drude1D(Fittable1DModel):
"""
Drude model based one the behavior of electons in materials (esp. metals).
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
Model formula:
.. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Drude1D
fig, ax = plt.subplots()
# generate the curves and plot them
x = np.arange(7.5 , 12.5 , 0.1)
dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0)
ax.plot(x, dmodel(x))
ax.set_xlabel('x')
ax.set_ylabel('F(x)')
plt.show()
"""
amplitude = Parameter(default=1.0, description="Peak Value")
x_0 = Parameter(default=1.0, description="Position of the peak")
fwhm = Parameter(default=1.0, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""
One dimensional Drude model function.
"""
return (
amplitude
* ((fwhm / x_0) ** 2)
/ ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""
Drude1D model function derivatives.
"""
d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
d_x_0 = (
-2
* amplitude
* d_amplitude
* (
(1 / x_0)
+ d_amplitude
* (x_0**2 / fwhm**2)
* (
(-x / x_0 - 1 / x) * (x / x_0 - x_0 / x)
- (2 * fwhm**2 / x_0**3)
)
)
)
d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
@x_0.validator
def x_0(self, val):
"""Ensure `x_0` is not 0."""
if np.any(val == 0):
raise InputParameterError("0 is not an allowed value for x_0")
def bounding_box(self, factor=50):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
class Plummer1D(Fittable1DModel):
r"""One dimensional Plummer density profile model.
Parameters
----------
mass : float
Total mass of cluster.
r_plum : float
Scale parameter which sets the size of the cluster core.
Notes
-----
Model formula:
.. math::
\rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2}
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P
"""
mass = Parameter(default=1.0, description="Total mass of cluster")
r_plum = Parameter(
default=1.0,
description="Scale parameter which sets the size of the cluster core",
)
@staticmethod
def evaluate(x, mass, r_plum):
"""
Evaluate plummer density profile model.
"""
return (
(3 * mass) / (4 * np.pi * r_plum**3) * (1 + (x / r_plum) ** 2) ** (-5 / 2)
)
@staticmethod
def fit_deriv(x, mass, r_plum):
"""
Plummer1D model derivatives.
"""
d_mass = 3 / ((4 * np.pi * r_plum**3) * (((x / r_plum) ** 2 + 1) ** (5 / 2)))
d_r_plum = (6 * mass * x**2 - 9 * mass * r_plum**2) / (
(4 * np.pi * r_plum**6) * (1 + (x / r_plum) ** 2) ** (7 / 2)
)
return [d_mass, d_r_plum]
@property
def input_units(self):
if self.mass.unit is None and self.r_plum.unit is None:
return None
else:
return {self.inputs[0]: self.r_plum.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"mass": outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3,
"r_plum": inputs_unit[self.inputs[0]],
}
class NFW(Fittable1DModel):
r"""
Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter.
Parameters
----------
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
massfactor : tuple or str
Mass overdensity factor and type for provided profiles:
Tuple version:
("virial",) : virial radius
("critical", N) : radius where density is N times that of the critical density
("mean", N) : radius where density is N times that of the mean density
String version:
"virial" : virial radius
"Nc" : radius where density is N times that of the critical density (e.g. "200c")
"Nm" : radius where density is N times that of the mean density (e.g. "500m")
cosmo : :class:`~astropy.cosmology.Cosmology`
Background cosmology for density calculation. If None, the default cosmology will be used.
Notes
-----
Model formula:
.. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2}
References
----------
.. [1] https://arxiv.org/pdf/astro-ph/9508025
.. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile
.. [3] https://en.wikipedia.org/wiki/Virial_mass
"""
# Model Parameters
# NFW Profile mass
mass = Parameter(
default=1.0,
min=1.0,
unit=u.M_sun,
description="Peak mass within specified overdensity radius",
)
# NFW profile concentration
concentration = Parameter(default=1.0, min=1.0, description="Concentration")
# NFW Profile redshift
redshift = Parameter(default=0.0, min=0.0, description="Redshift")
# We allow values without units to be passed when evaluating the model, and
# in this case the input r values are assumed to be lengths / positions in kpc.
_input_units_allow_dimensionless = True
def __init__(
self,
mass=u.Quantity(mass.default, mass.unit),
concentration=concentration.default,
redshift=redshift.default,
massfactor=("critical", 200),
cosmo=None,
**kwargs,
):
# Set default cosmology
if cosmo is None:
# LOCAL
from astropy.cosmology import default_cosmology
cosmo = default_cosmology.get()
# Set mass overdensity type and factor
self._density_delta(massfactor, cosmo, redshift)
# Establish mass units for density calculation (default solar masses)
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Obtain scale radius
self._radius_s(mass, concentration)
# Obtain scale density
self._density_s(mass, concentration)
super().__init__(
mass=in_mass, concentration=concentration, redshift=redshift, **kwargs
)
def evaluate(self, r, mass, concentration, redshift):
"""
One dimensional NFW profile function.
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of density to be calculated for the NFW profile.
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
Returns
-------
density : float or `~astropy.units.Quantity` ['density']
NFW profile mass density at location ``r``. The density units are:
[``mass`` / ``r`` ^3]
Notes
-----
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Create radial version of input with dimension
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Define reduced radius (r / r_{\\rm s})
# also update scale radius
radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit)
# Density distribution
# \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2}
# also update scale density
density = self._density_s(mass, concentration) / (
radius_reduced * (u.Quantity(1.0) + radius_reduced) ** 2
)
if hasattr(mass, "unit"):
return density
else:
return density.value
def _density_delta(self, massfactor, cosmo, redshift):
"""
Calculate density delta.
"""
# Set mass overdensity type and factor
if isinstance(massfactor, tuple):
# Tuple options
# ("virial") : virial radius
# ("critical", N) : radius where density is N that of the critical density
# ("mean", N) : radius where density is N that of the mean density
if massfactor[0].lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor[0].lower()
elif massfactor[0].lower() == "critical":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = "c"
elif massfactor[0].lower() == "mean":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = "m"
else:
raise ValueError(
f"Massfactor '{massfactor[0]}' not one of 'critical', "
"'mean', or 'virial'"
)
else:
try:
# String options
# virial : virial radius
# Nc : radius where density is N that of the critical density
# Nm : radius where density is N that of the mean density
if massfactor.lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor.lower()
elif massfactor[-1].lower() == "c" or massfactor[-1].lower() == "m":
# Critical or Mean Overdensity Mass
delta = float(massfactor[0:-1])
masstype = massfactor[-1].lower()
else:
raise ValueError(
f"Massfactor {massfactor} string not of the form "
"'#m', '#c', or 'virial'"
)
except (AttributeError, TypeError):
raise TypeError(f"Massfactor {massfactor} not a tuple or string")
# Set density from masstype specification
if masstype == "virial":
Om_c = cosmo.Om(redshift) - 1.0
d_c = 18.0 * np.pi**2 + 82.0 * Om_c - 39.0 * Om_c**2
self.density_delta = d_c * cosmo.critical_density(redshift)
elif masstype == "c":
self.density_delta = delta * cosmo.critical_density(redshift)
elif masstype == "m":
self.density_delta = (
delta * cosmo.critical_density(redshift) * cosmo.Om(redshift)
)
return self.density_delta
@staticmethod
def A_NFW(y):
r"""
Dimensionless volume integral of the NFW profile, used as an intermediate step in some
calculations for this model.
Notes
-----
Model formula:
.. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}]
"""
return np.log(1.0 + y) - (y / (1.0 + y))
def _density_s(self, mass, concentration):
"""
Calculate scale density of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Calculate scale density
# M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right].
self.density_s = in_mass / (
4.0
* np.pi
* self._radius_s(in_mass, concentration) ** 3
* self.A_NFW(concentration)
)
return self.density_s
@property
def rho_scale(self):
r"""
Scale density of the NFW profile. Often written in the literature as :math:`\rho_s`.
"""
return self.density_s
def _radius_s(self, mass, concentration):
"""
Calculate scale radius of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Delta Mass is related to delta radius by
# M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c}
# And delta radius is related to the NFW scale radius by
# c = R / r_{\\rm s}
self.radius_s = (
((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** (1.0 / 3.0)
) / concentration
# Set radial units to kiloparsec by default (unit will be rescaled by units of radius
# in evaluate)
return self.radius_s.to(u.kpc)
@property
def r_s(self):
"""
Scale radius of the NFW profile.
"""
return self.radius_s
@property
def r_virial(self):
"""
Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.).
"""
return self.r_s * self.concentration
@property
def r_max(self):
"""
Radius of maximum circular velocity.
"""
return self.r_s * 2.16258
@property
def v_max(self):
"""
Maximum circular velocity.
"""
return self.circular_velocity(self.r_max)
def circular_velocity(self, r):
r"""
Circular velocities of the NFW profile.
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of velocity to be calculated for the NFW profile.
Returns
-------
velocity : float or `~astropy.units.Quantity` ['speed']
NFW profile circular velocity at location ``r``. The velocity units are:
[km / s]
Notes
-----
Model formula:
.. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
.. math:: x = r/r_s
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Enforce default units (if parameters are without units)
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir)
v_profile = np.sqrt(
self.mass
* const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2))
/ self.r_virial
)
# Define reduced radius (r / r_{\\rm s})
reduced_radius = in_r / self.r_virial.to(in_r.unit)
# Circular velocity given by:
# v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
# where x=r/r_{200}
velocity = np.sqrt(
(v_profile**2 * self.A_NFW(self.concentration * reduced_radius))
/ (reduced_radius * self.A_NFW(self.concentration))
)
return velocity.to(u.km / u.s)
@property
def input_units(self):
# The units for the 'r' variable should be a length (default kpc)
return {self.inputs[0]: u.kpc}
@property
def return_units(self):
# The units for the 'density' variable should be a matter density (default M_sun / kpc^3)
if self.mass.unit is None:
return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3}
else:
return {
self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"mass": u.M_sun, "concentration": None, "redshift": None}
|
494734e37bc0f0ceb97d28f4ae7b56861059231edeb734d3ff1f33773f7f92cf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for accessing, downloading, and caching data files."""
import atexit
import contextlib
import errno
import fnmatch
import ftplib
import functools
import hashlib
import io
import os
import re
import shutil
# import ssl moved inside functions using ssl to avoid import failure
# when running in pyodide/Emscripten
import sys
import urllib.error
import urllib.parse
import urllib.request
import zipfile
from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir, mkdtemp
from warnings import warn
try:
import certifi
except ImportError:
# certifi support is optional; when available it will be used for TLS/SSL
# downloads
certifi = None
import astropy.config.paths
from astropy import config as _config
from astropy.utils.compat.optional_deps import HAS_FSSPEC
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.introspection import find_current_module, resolve_name
# Order here determines order in the autosummary
__all__ = [
"Conf",
"conf",
"download_file",
"download_files_in_parallel",
"get_readable_fileobj",
"get_pkg_data_fileobj",
"get_pkg_data_filename",
"get_pkg_data_contents",
"get_pkg_data_fileobjs",
"get_pkg_data_filenames",
"get_pkg_data_path",
"is_url",
"is_url_in_cache",
"get_cached_urls",
"cache_total_size",
"cache_contents",
"export_download_cache",
"import_download_cache",
"import_file_to_cache",
"check_download_cache",
"clear_download_cache",
"compute_hash",
"get_free_space_in_dir",
"check_free_space_in_dir",
"get_file_contents",
"CacheMissingWarning",
"CacheDamaged",
]
_dataurls_to_alias = {}
class _NonClosingBufferedReader(io.BufferedReader):
def __del__(self):
try:
# NOTE: self.raw will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __del__(self):
try:
# NOTE: self.stream will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.data`.
"""
dataurl = _config.ConfigItem(
"http://data.astropy.org/", "Primary URL for astropy remote data site."
)
dataurl_mirror = _config.ConfigItem(
"http://www.astropy.org/astropy-data/",
"Mirror URL for astropy remote data site.",
)
default_http_user_agent = _config.ConfigItem(
"astropy",
"Default User-Agent for HTTP request headers. This can be overwritten "
"for a particular call via http_headers option, where available. "
"This only provides the default value when not set by https_headers.",
)
remote_timeout = _config.ConfigItem(
10.0,
"Time to wait for remote data queries (in seconds).",
aliases=["astropy.coordinates.name_resolve.name_resolve_timeout"],
)
allow_internet = _config.ConfigItem(
True, "If False, prevents any attempt to download from Internet."
)
compute_hash_block_size = _config.ConfigItem(
2**16, "Block size for computing file hashes." # 64K
)
download_block_size = _config.ConfigItem(
2**16, "Number of bytes of remote data to download per step." # 64K
)
delete_temporary_downloads_at_exit = _config.ConfigItem(
True,
"If True, temporary download files created when the cache is "
"inaccessible will be deleted at the end of the python session.",
)
conf = Conf()
class CacheMissingWarning(AstropyWarning):
"""
This warning indicates the standard cache directory is not accessible, with
the first argument providing the warning message. If args[1] is present, it
is a filename indicating the path to a temporary file that was created to
store a remote data download in the absence of the cache.
"""
def is_url(string):
"""
Test whether a string is a valid URL for :func:`download_file`.
Parameters
----------
string : str
The string to test.
Returns
-------
status : bool
String is URL or not.
"""
url = urllib.parse.urlparse(string)
# we can't just check that url.scheme is not an empty string, because
# file paths in windows would return a non-empty scheme (e.g. e:\\
# returns 'e').
return url.scheme.lower() in ["http", "https", "ftp", "sftp", "ssh", "file"]
# Backward compatibility because some downstream packages allegedly uses it.
_is_url = is_url
def _requires_fsspec(url):
"""Does the `url` require the optional ``fsspec`` dependency to open?"""
return isinstance(url, str) and url.startswith(("s3://", "gs://"))
def _is_inside(path, parent_path):
# We have to try realpath too to avoid issues with symlinks, but we leave
# abspath because some systems like debian have the absolute path (with no
# symlinks followed) match, but the real directories in different
# locations, so need to try both cases.
return os.path.abspath(path).startswith(
os.path.abspath(parent_path)
) or os.path.realpath(path).startswith(os.path.realpath(parent_path))
@contextlib.contextmanager
def get_readable_fileobj(
name_or_obj,
encoding=None,
cache=False,
show_progress=True,
remote_timeout=None,
sources=None,
http_headers=None,
*,
use_fsspec=None,
fsspec_kwargs=None,
close_files=True,
):
"""Yield a readable, seekable file-like object from a file or URL.
This supports passing filenames, URLs, and readable file-like objects,
any of which can be compressed in gzip, bzip2 or lzma (xz) if the
appropriate compression libraries are provided by the Python installation.
Notes
-----
This function is a context manager, and should be used for example
as::
with get_readable_fileobj('file.dat') as f:
contents = f.read()
If a URL is provided and the cache is in use, the provided URL will be the
name used in the cache. The contents may already be stored in the cache
under this URL provided, they may be downloaded from this URL, or they may
be downloaded from one of the locations listed in ``sources``. See
`~download_file` for details.
Parameters
----------
name_or_obj : str or file-like
The filename of the file to access (if given as a string), or
the file-like object to access.
If a file-like object, it must be opened in binary mode.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
check the remote URL for a new version but store the result
in the cache.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name_or_obj`` starts with the Amazon S3 storage prefix ``s3://``
or the Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g. ``http://``) but in this case you must
explicitly pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
close_files : bool, optional
Close the file object when exiting the context manager.
Default is `True`.
.. versionadded:: 5.2
Returns
-------
file : readable file-like
"""
# close_fds is a list of file handles created by this function
# that need to be closed. We don't want to always just close the
# returned file handle, because it may simply be the file handle
# passed in. In that case it is not the responsibility of this
# function to close it: doing so could result in a "double close"
# and an "invalid file descriptor" exception.
close_fds = []
delete_fds = []
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
# Have `use_fsspec` default to ``True`` if the user passed an Amazon S3
# or Google Cloud Storage URI.
if use_fsspec is None and _requires_fsspec(name_or_obj):
use_fsspec = True
if use_fsspec:
if not isinstance(name_or_obj, str):
raise TypeError("`name_or_obj` must be a string when `use_fsspec=True`")
if fsspec_kwargs is None:
fsspec_kwargs = {}
# name_or_obj could be an os.PathLike object
if isinstance(name_or_obj, os.PathLike):
name_or_obj = os.fspath(name_or_obj)
# Get a file object to the content
if isinstance(name_or_obj, str):
# Use fsspec to open certain cloud-hosted files (e.g., AWS S3, Google Cloud Storage)
if use_fsspec:
if not HAS_FSSPEC:
raise ModuleNotFoundError("please install `fsspec` to open this file")
import fsspec # local import because it is a niche dependency
openfileobj = fsspec.open(name_or_obj, **fsspec_kwargs)
close_fds.append(openfileobj)
fileobj = openfileobj.open()
close_fds.append(fileobj)
else:
is_url = _is_url(name_or_obj)
if is_url:
name_or_obj = download_file(
name_or_obj,
cache=cache,
show_progress=show_progress,
timeout=remote_timeout,
sources=sources,
http_headers=http_headers,
)
fileobj = io.FileIO(name_or_obj, "r")
if is_url and not cache:
delete_fds.append(fileobj)
close_fds.append(fileobj)
else:
fileobj = name_or_obj
# Check if the file object supports random access, and if not,
# then wrap it in a BytesIO buffer. It would be nicer to use a
# BufferedReader to avoid reading loading the whole file first,
# but that might not be compatible with all possible I/O classes.
if not hasattr(fileobj, "seek"):
try:
# py.path.LocalPath objects have .read() method but it uses
# text mode, which won't work. .read_binary() does, and
# surely other ducks would return binary contents when
# called like this.
# py.path.LocalPath is what comes from the legacy tmpdir fixture
# in pytest.
fileobj = io.BytesIO(fileobj.read_binary())
except AttributeError:
fileobj = io.BytesIO(fileobj.read())
# Now read enough bytes to look at signature
signature = fileobj.read(4)
fileobj.seek(0)
if signature[:3] == b"\x1f\x8b\x08": # gzip
import struct
try:
import gzip
fileobj_new = gzip.GzipFile(fileobj=fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really gzip
except (OSError, EOFError, struct.error): # invalid gzip file
fileobj.seek(0)
fileobj_new.close()
else:
fileobj_new.seek(0)
fileobj = fileobj_new
elif signature[:3] == b"BZh": # bzip2
try:
import bz2
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
try:
# bz2.BZ2File does not support file objects, only filenames, so we
# need to write the data to a temporary file
with NamedTemporaryFile("wb", delete=False) as tmp:
tmp.write(fileobj.read())
tmp.close()
fileobj_new = bz2.BZ2File(tmp.name, mode="rb")
fileobj_new.read(1) # need to check that the file is really bzip2
except OSError: # invalid bzip2 file
fileobj.seek(0)
fileobj_new.close()
# raise
else:
fileobj_new.seek(0)
close_fds.append(fileobj_new)
fileobj = fileobj_new
elif signature[:3] == b"\xfd7z": # xz
try:
import lzma
fileobj_new = lzma.LZMAFile(fileobj, mode="rb")
fileobj_new.read(1) # need to check that the file is really xz
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the lzma module."
)
except (OSError, EOFError): # invalid xz file
fileobj.seek(0)
fileobj_new.close()
# should we propagate this to the caller to signal bad content?
# raise ValueError(e)
else:
fileobj_new.seek(0)
fileobj = fileobj_new
# By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File
# or lzma.LZMAFile instance opened in binary mode (that is, read
# returns bytes). Now we need to, if requested, wrap it in a
# io.TextIOWrapper so read will return unicode based on the
# encoding parameter.
needs_textio_wrapper = encoding != "binary"
if needs_textio_wrapper:
# A bz2.BZ2File can not be wrapped by a TextIOWrapper,
# so we decompress it to a temporary file and then
# return a handle to that.
try:
import bz2
except ImportError:
pass
else:
if isinstance(fileobj, bz2.BZ2File):
tmp = NamedTemporaryFile("wb", delete=False)
data = fileobj.read()
tmp.write(data)
tmp.close()
delete_fds.append(tmp)
fileobj = io.FileIO(tmp.name, "r")
close_fds.append(fileobj)
fileobj = _NonClosingBufferedReader(fileobj)
fileobj = _NonClosingTextIOWrapper(fileobj, encoding=encoding)
# Ensure that file is at the start - io.FileIO will for
# example not always be at the start:
# >>> import io
# >>> f = open('test.fits', 'rb')
# >>> f.read(4)
# 'SIMP'
# >>> f.seek(0)
# >>> fileobj = io.FileIO(f.fileno())
# >>> fileobj.tell()
# 4096L
fileobj.seek(0)
try:
yield fileobj
finally:
if close_files:
for fd in close_fds:
fd.close()
for fd in delete_fds:
os.remove(fd.name)
def get_file_contents(*args, **kwargs):
"""
Retrieves the contents of a filename or file-like object.
See the `get_readable_fileobj` docstring for details on parameters.
Returns
-------
object
The content of the file (as requested by ``encoding``).
"""
with get_readable_fileobj(*args, **kwargs) as f:
return f.read()
@contextlib.contextmanager
def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations for the package and
provides the file as a file-like object that reads bytes.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
fileobj : file-like
An object with the contents of the data file available via
``read`` function. Can be used as part of a ``with`` statement,
automatically closing itself after the ``with`` block.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Examples
--------
This will retrieve a data file and its contents for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('data/3d_cd.hdr',
... package='astropy.wcs.tests') as fobj:
... fcontents = fobj.read()
...
This next example would download a data file from the astropy data server
because the ``allsky/allsky_rosat.fits`` file is not present in the
source distribution. It will also save the file locally so the
next time it is accessed it won't need to be downloaded.::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
This does the same thing but does *not* cache it locally::
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_filename : returns a local name for a file containing the data
"""
datafn = get_pkg_data_path(data_name, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
with get_readable_fileobj(datafn, encoding=encoding) as fileobj:
yield fileobj
else: # remote file
with get_readable_fileobj(
conf.dataurl + data_name,
encoding=encoding,
cache=cache,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
) as fileobj:
# We read a byte to trigger any URLErrors
fileobj.read(1)
fileobj.seek(0)
yield fileobj
def get_pkg_data_filename(
data_name, package=None, show_progress=True, remote_timeout=None
):
"""
Retrieves a data file from the standard locations for the package and
provides a local filename for the data.
This function is similar to `get_pkg_data_fileobj` but returns the
file *name* instead of a readable file-like object. This means
that this function must always cache remote files locally, unlike
`get_pkg_data_fileobj`.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for the requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Returns
-------
filename : str
A file path on the local file system corresponding to the data
requested in ``data_name``.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('data/3d_cd.hdr',
... package='astropy.wcs.tests')
>>> with open(fn) as f:
... fcontents = f.read()
...
This retrieves a data file by hash either locally or from the astropy data
server::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP
>>> with open(fn) as f:
... fcontents = f.read()
...
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_fileobj : returns a file-like object with the data
"""
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
if data_name.startswith("hash/"):
# first try looking for a local version if a hash is specified
hashfn = _find_hash_fn(data_name[5:])
if hashfn is None:
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
else:
return hashfn
else:
fs_path = os.path.normpath(data_name)
datafn = get_pkg_data_path(fs_path, package=package)
if os.path.isdir(datafn):
raise OSError(
"Tried to access a data file that's actually a package data directory"
)
elif os.path.isfile(datafn): # local file
return datafn
else: # remote file
return download_file(
conf.dataurl + data_name,
cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name],
)
def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations and returns its
contents as a bytes object.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
* A URL to some other file.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
contents : bytes
The complete contents of the file as a bytes object.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
See Also
--------
get_pkg_data_fileobj : returns a file-like object with the data
get_pkg_data_filename : returns a local name for a file containing the data
"""
with get_pkg_data_fileobj(
data_name, package=package, encoding=encoding, cache=cache
) as fd:
contents = fd.read()
return contents
def get_pkg_data_filenames(datadir, package=None, pattern="*"):
"""
Returns the path of all of the data files in a given directory
that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``.
* Remote URLs are not currently supported.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
Returns
-------
filenames : iterator of str
Paths on the local filesystem in *datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... with open(fn) as f:
... fcontents = f.read()
...
"""
path = get_pkg_data_path(datadir, package=package)
if os.path.isfile(path):
raise OSError(
"Tried to access a data directory that's actually a package data file"
)
elif os.path.isdir(path):
for filename in os.listdir(path):
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(path, filename)
else:
raise OSError("Path not found")
def get_pkg_data_fileobjs(datadir, package=None, pattern="*", encoding=None):
"""
Returns readable file objects for all of the data files in a given
directory that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``
* Remote URLs are not currently supported
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
Returns
-------
fileobjs : iterator of file object
File objects for each of the files on the local filesystem in
*datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... fcontents = fd.read()
...
"""
for fn in get_pkg_data_filenames(datadir, package=package, pattern=pattern):
with get_readable_fileobj(fn, encoding=encoding) as fd:
yield fd
def compute_hash(localfn):
"""Computes the MD5 hash for a file.
The hash for a data file is used for looking up data files in a unique
fashion. This is of particular use for tests; a test may require a
particular version of a particular file, in which case it can be accessed
via hash to get the appropriate version.
Typically, if you wish to write a test that requires a particular data
file, you will want to submit that file to the astropy data servers, and
use
e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``,
but with the hash for your file in place of the hash in the example.
Parameters
----------
localfn : str
The path to the file for which the hash should be generated.
Returns
-------
hash : str
The hex digest of the cryptographic hash for the contents of the
``localfn`` file.
"""
with open(localfn, "rb") as f:
h = hashlib.md5()
block = f.read(conf.compute_hash_block_size)
while block:
h.update(block)
block = f.read(conf.compute_hash_block_size)
return h.hexdigest()
def get_pkg_data_path(*path, package=None):
"""Get path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings for ``os.path`` joining.
package : str or None, optional, keyword-only
If specified, look for a file relative to the given package, rather
than the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
Raises
------
ImportError
Given package or module is not importable.
RuntimeError
If the local data file is outside of the package's tree.
"""
if package is None:
module = find_current_module(1, finddiff=["astropy.utils.data", "contextlib"])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, "__package__") or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if "." in module.__name__:
package = module.__name__.rpartition(".")[0]
else:
package = module.__name__
else:
package = module.__package__
else:
# package errors if it isn't a str
# so there is no need for checks in the containing if/else
module = resolve_name(package)
# module path within package
module_path = os.path.dirname(module.__file__)
full_path = os.path.join(module_path, *path)
# Check that file is inside tree.
rootpkgname = package.partition(".")[0]
rootpkg = resolve_name(rootpkgname)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(full_path, root_dir):
raise RuntimeError(
f"attempted to get a local data file outside of the {rootpkgname} tree."
)
return full_path
def _find_hash_fn(hexdigest, pkgname="astropy"):
"""
Looks for a local file by hash - returns file name if found and a valid
file, otherwise returns None.
"""
for v in cache_contents(pkgname=pkgname).values():
if compute_hash(v) == hexdigest:
return v
return None
def get_free_space_in_dir(path, unit=False):
"""
Given a path to a directory, returns the amount of free space
on that filesystem.
Parameters
----------
path : str
The path to a directory.
unit : bool or `~astropy.units.Unit`
Return the amount of free space as Quantity in the given unit,
if provided. Default is `False` for backward-compatibility.
Returns
-------
free_space : int or `~astropy.units.Quantity`
The amount of free space on the partition that the directory is on.
If ``unit=False``, it is returned as plain integer (in bytes).
"""
if not os.path.isdir(path):
raise OSError(
"Can only determine free space associated with directories, not files."
)
# Actually you can on Linux but I want to avoid code that fails
# on Windows only.
free_space = shutil.disk_usage(path).free
if unit:
from astropy import units as u
# TODO: Automatically determine best prefix to use.
if unit is True:
unit = u.byte
free_space = u.Quantity(free_space, u.byte).to(unit)
return free_space
def check_free_space_in_dir(path, size):
"""
Determines if a given directory has enough space to hold a file of
a given size.
Parameters
----------
path : str
The path to a directory.
size : int or `~astropy.units.Quantity`
A proposed filesize. If not a Quantity, assume it is in bytes.
Raises
------
OSError
There is not enough room on the filesystem.
"""
space = get_free_space_in_dir(path, unit=getattr(size, "unit", False))
if space < size:
from astropy.utils.console import human_file_size
raise OSError(
f"Not enough free space in {path} "
f"to download a {human_file_size(size)} file, "
f"only {human_file_size(space)} left"
)
class _ftptlswrapper(urllib.request.ftpwrapper):
def init(self):
self.busy = 0
self.ftp = ftplib.FTP_TLS()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
self.ftp.prot_p()
_target = "/".join(self.dirs)
self.ftp.cwd(_target)
class _FTPTLSHandler(urllib.request.FTPHandler):
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
return _ftptlswrapper(user, passwd, host, port, dirs, timeout, persistent=False)
@functools.lru_cache
def _build_urlopener(ftp_tls=False, ssl_context=None, allow_insecure=False):
"""
Helper for building a `urllib.request.build_opener` which handles TLS/SSL.
"""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
ssl_context = dict(it for it in ssl_context) if ssl_context else {}
cert_chain = {}
if "certfile" in ssl_context:
cert_chain.update(
{
"certfile": ssl_context.pop("certfile"),
"keyfile": ssl_context.pop("keyfile", None),
"password": ssl_context.pop("password", None),
}
)
elif "password" in ssl_context or "keyfile" in ssl_context:
raise ValueError(
"passing 'keyfile' or 'password' in the ssl_context argument "
"requires passing 'certfile' as well"
)
if "cafile" not in ssl_context and certifi is not None:
ssl_context["cafile"] = certifi.where()
ssl_context = ssl.create_default_context(**ssl_context)
if allow_insecure:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
if cert_chain:
ssl_context.load_cert_chain(**cert_chain)
https_handler = urllib.request.HTTPSHandler(context=ssl_context)
if ftp_tls:
urlopener = urllib.request.build_opener(_FTPTLSHandler(), https_handler)
else:
urlopener = urllib.request.build_opener(https_handler)
return urlopener
def _try_url_open(
source_url,
timeout=None,
http_headers=None,
ftp_tls=False,
ssl_context=None,
allow_insecure=False,
):
"""Helper for opening a URL while handling TLS/SSL verification issues."""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
# Always try first with a secure connection
# _build_urlopener uses lru_cache, so the ssl_context argument must be
# converted to a hashshable type (a set of 2-tuples)
ssl_context = frozenset(ssl_context.items() if ssl_context else [])
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=False
)
req = urllib.request.Request(source_url, headers=http_headers)
try:
return urlopener.open(req, timeout=timeout)
except urllib.error.URLError as exc:
reason = exc.reason
if (
isinstance(reason, ssl.SSLError)
and reason.reason == "CERTIFICATE_VERIFY_FAILED"
):
msg = (
f"Verification of TLS/SSL certificate at {source_url} "
"failed: this can mean either the server is "
"misconfigured or your local root CA certificates are "
"out-of-date; in the latter case this can usually be "
'addressed by installing the Python package "certifi" '
"(see the documentation for astropy.utils.data.download_url)"
)
if not allow_insecure:
msg += (
" or in both cases you can work around this by "
"passing allow_insecure=True, but only if you "
"understand the implications; the original error "
f"was: {reason}"
)
raise urllib.error.URLError(msg)
else:
msg += ". Re-trying with allow_insecure=True."
warn(msg, AstropyWarning)
# Try again with a new urlopener allowing insecure connections
urlopener = _build_urlopener(
ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=True
)
return urlopener.open(req, timeout=timeout)
raise
def _download_file_from_source(
source_url,
show_progress=True,
timeout=None,
remote_url=None,
cache=False,
pkgname="astropy",
http_headers=None,
ftp_tls=None,
ssl_context=None,
allow_insecure=False,
):
from astropy.utils.console import ProgressBarOrSpinner
if not conf.allow_internet:
raise urllib.error.URLError(
f"URL {remote_url} was supposed to be downloaded but "
f"allow_internet is {conf.allow_internet}; "
"if this is unexpected check the astropy.cfg file for the option "
"allow_internet"
)
if remote_url is None:
remote_url = source_url
if http_headers is None:
http_headers = {}
if ftp_tls is None and urllib.parse.urlparse(remote_url).scheme == "ftp":
try:
return _download_file_from_source(
source_url,
show_progress=show_progress,
timeout=timeout,
remote_url=remote_url,
cache=cache,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=False,
)
except urllib.error.URLError as e:
# e.reason might not be a string, e.g. socket.gaierror
# URLError changed to report original exception in Python 3.10, 3.11 (bpo-43564)
if str(e.reason).lstrip("ftp error: ").startswith(("error_perm", "5")):
ftp_tls = True
else:
raise
with _try_url_open(
source_url,
timeout=timeout,
http_headers=http_headers,
ftp_tls=ftp_tls,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
) as remote:
info = remote.info()
try:
size = int(info["Content-Length"])
except (KeyError, ValueError, TypeError):
size = None
if size is not None:
check_free_space_in_dir(gettempdir(), size)
if cache:
dldir = _get_download_cache_loc(pkgname)
check_free_space_in_dir(dldir, size)
# If a user has overridden sys.stdout it might not have the
# isatty method, in that case assume it's not a tty
is_tty = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
if show_progress and is_tty:
progress_stream = sys.stdout
else:
progress_stream = io.StringIO()
if source_url == remote_url:
dlmsg = f"Downloading {remote_url}"
else:
dlmsg = f"Downloading {remote_url} from {source_url}"
with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p:
with NamedTemporaryFile(
prefix=f"astropy-download-{os.getpid()}-", delete=False
) as f:
try:
bytes_read = 0
block = remote.read(conf.download_block_size)
while block:
f.write(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(conf.download_block_size)
if size is not None and bytes_read > size:
raise urllib.error.URLError(
f"File was supposed to be {size} bytes but "
f"server provides more, at least {bytes_read} "
"bytes. Download failed."
)
if size is not None and bytes_read < size:
raise urllib.error.ContentTooShortError(
f"File was supposed to be {size} bytes but we "
f"only got {bytes_read} bytes. Download failed.",
content=None,
)
except BaseException:
if os.path.exists(f.name):
try:
os.remove(f.name)
except OSError:
pass
raise
return f.name
def download_file(
remote_url,
cache=False,
show_progress=True,
timeout=None,
sources=None,
pkgname="astropy",
http_headers=None,
ssl_context=None,
allow_insecure=False,
):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {"User-Agent": conf.default_http_user_agent, "Accept": "*/*"}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
cache = False
missing_cache = (
f"Cache directory cannot be read or created ({e}), "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(
f"Cache value '{cache}' was requested but "
"'update' is the only recognized string; "
"otherwise use a boolean"
)
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (
hasattr(e, "reason")
and hasattr(e.reason, "errno")
and e.reason.errno == 8
):
e.reason.strerror = f"{e.reason.strerror}. requested URL: {remote_url}"
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
"Please include primary URL in sources if you want it to be "
"included as a valid source."
)
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}"
) from errors[sources[0]]
if cache:
try:
return import_file_to_cache(
url_key,
f_name,
remove_original=True,
replace=(cache == "update"),
pkgname=pkgname,
)
except PermissionError as e:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only ({e}), unable to import "
f"downloaded file, providing data in temporary file {f_name} "
"instead."
)
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
def is_url_in_cache(url_key, pkgname="astropy"):
"""Check if a download for ``url_key`` is in the cache.
The provided ``url_key`` will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
Parameters
----------
url_key : str
The URL retrieved
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
in_cache : bool
`True` if a download for ``url_key`` is in the cache, `False` if not
or if the cache does not exist at all.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
return False
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
return os.path.exists(filename)
def cache_total_size(pkgname="astropy"):
"""Return the total size in bytes of all files in the cache."""
size = 0
dldir = _get_download_cache_loc(pkgname=pkgname)
for root, dirs, files in os.walk(dldir):
size += sum(os.path.getsize(os.path.join(root, name)) for name in files)
return size
def _do_download_files_in_parallel(kwargs):
with astropy.config.paths.set_temp_config(kwargs.pop("temp_config")):
with astropy.config.paths.set_temp_cache(kwargs.pop("temp_cache")):
return download_file(**kwargs)
def download_files_in_parallel(
urls,
cache="update",
show_progress=True,
timeout=None,
sources=None,
multiprocessing_start_method=None,
pkgname="astropy",
):
"""Download multiple files in parallel from the given URLs.
Blocks until all files have downloaded. The result is a list of
local file paths corresponding to the given urls.
The results will be stored in the cache under the values in ``urls`` even
if they are obtained from some other location via ``sources``. See
`~download_file` for details.
Parameters
----------
urls : list of str
The URLs to retrieve.
cache : bool or "update", optional
Whether to use the cache (default is `True`). If "update",
always download the remote URLs to see if new data is available
and store the result in cache.
.. versionchanged:: 4.0
The default was changed to ``"update"`` and setting it to
``False`` will print a Warning and set it to ``"update"`` again,
because the function will not work properly without cache. Using
``True`` will work as expected.
.. versionchanged:: 3.0
The default was changed to ``True`` and setting it to ``False``
will print a Warning and set it to ``True`` again, because the
function will not work properly without cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`)
timeout : float, optional
Timeout for each individual requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
sources : dict, optional
If provided, for each URL a list of URLs to try to obtain the
file from. The result will be stored under the original URL.
For any URL in this dictionary, the original URL will *not* be
tried unless it is in this list; this is to prevent long waits
for a primary server that is known to be inaccessible at the
moment.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
paths : list of str
The local file paths corresponding to the downloaded URLs.
Notes
-----
If a URL is unreachable, the downloading will grind to a halt and the
exception will propagate upward, but an unpredictable number of
files will have been successfully downloaded and will remain in
the cache.
"""
from .console import ProgressBar
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = {}
if not cache:
# See issue #6662, on windows won't work because the files are removed
# again before they can be used. On *NIX systems it will behave as if
# cache was set to True because multiprocessing cannot insert the items
# in the list of to-be-removed files. This could be fixed, but really,
# just use the cache, with update_cache if appropriate.
warn(
"Disabling the cache does not work because of multiprocessing, "
'it will be set to ``"update"``. You may need to manually remove '
"the cached files with clear_download_cache() afterwards.",
AstropyWarning,
)
cache = "update"
if show_progress:
progress = sys.stdout
else:
progress = io.BytesIO()
# Combine duplicate URLs
combined_urls = list(set(urls))
combined_paths = ProgressBar.map(
_do_download_files_in_parallel,
[
dict(
remote_url=u,
cache=cache,
show_progress=False,
timeout=timeout,
sources=sources.get(u, None),
pkgname=pkgname,
temp_cache=astropy.config.paths.set_temp_cache._temp_path,
temp_config=astropy.config.paths.set_temp_config._temp_path,
)
for u in combined_urls
],
file=progress,
multiprocess=True,
multiprocessing_start_method=multiprocessing_start_method,
)
paths = []
for url in urls:
paths.append(combined_paths[combined_urls.index(url)])
return paths
# This is used by download_file and _deltemps to determine the files to delete
# when the interpreter exits
_tempfilestodel = []
@atexit.register
def _deltemps():
global _tempfilestodel
if _tempfilestodel is not None:
while len(_tempfilestodel) > 0:
fn = _tempfilestodel.pop()
if os.path.isfile(fn):
try:
os.remove(fn)
except OSError:
# oh well we tried
# could be held open by some process, on Windows
pass
elif os.path.isdir(fn):
try:
shutil.rmtree(fn)
except OSError:
# couldn't get rid of it, sorry
# could be held open by some process, on Windows
pass
def clear_download_cache(hashorurl=None, pkgname="astropy"):
"""Clears the data file cache by deleting the local file(s).
If a URL is provided, it will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
For the purposes of this function, a file can also be identified by a hash
of its contents or by the filename under which the data is stored (as
returned by `~download_file`, for example).
Parameters
----------
hashorurl : str or None
If None, the whole cache is cleared. Otherwise, specify
a hash for the cached file that is supposed to be deleted,
the full path to a file in the cache that should be deleted,
or a URL that should be removed from the cache if present.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
# Problem arose when trying to open the cache
# Just a warning, though
msg = "Not clearing data cache - cache inaccessible due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return
try:
if hashorurl is None:
# Optional: delete old incompatible caches too
_rmtree(dldir)
elif _is_url(hashorurl):
filepath = os.path.join(dldir, _url_to_dirname(hashorurl))
_rmtree(filepath)
else:
# Not a URL, it should be either a filename or a hash
filepath = os.path.join(dldir, hashorurl)
rp = os.path.relpath(filepath, dldir)
if rp.startswith(".."):
raise RuntimeError(
"attempted to use clear_download_cache on the path "
f"{filepath} outside the data cache directory {dldir}"
)
d, f = os.path.split(rp)
if d and f in ["contents", "url"]:
# It's a filename not the hash of a URL
# so we want to zap the directory containing the
# files "url" and "contents"
filepath = os.path.join(dldir, d)
if os.path.exists(filepath):
_rmtree(filepath)
elif len(hashorurl) == 2 * hashlib.md5().digest_size and re.match(
r"[0-9a-f]+", hashorurl
):
# It's the hash of some file contents, we have to find the right file
filename = _find_hash_fn(hashorurl)
if filename is not None:
clear_download_cache(filename)
except OSError as e:
msg = "Not clearing data from cache - problem arose "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
def _get_download_cache_loc(pkgname="astropy"):
"""Finds the path to the cache directory and makes them if they don't exist.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
datadir : str
The path to the data cache directory.
"""
try:
datadir = os.path.join(
astropy.config.paths.get_cache_dir(pkgname), "download", "url"
)
if not os.path.exists(datadir):
try:
os.makedirs(datadir)
except OSError:
if not os.path.exists(datadir):
raise
elif not os.path.isdir(datadir):
raise OSError(f"Data cache directory {datadir} is not a directory")
return datadir
except OSError as e:
msg = "Remote data cache could not be accessed due to "
estr = "" if len(e.args) < 1 else (": " + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
raise
def _url_to_dirname(url):
if not _is_url(url):
raise ValueError(f"Malformed URL: '{url}'")
# Make domain names case-insensitive
# Also makes the http:// case-insensitive
urlobj = list(urllib.parse.urlsplit(url))
urlobj[1] = urlobj[1].lower()
if urlobj[0].lower() in ["http", "https"] and urlobj[1] and urlobj[2] == "":
urlobj[2] = "/"
url_c = urllib.parse.urlunsplit(urlobj)
return hashlib.md5(url_c.encode("utf-8")).hexdigest()
class ReadOnlyDict(dict):
def __setitem__(self, key, value):
raise TypeError("This object is read-only.")
_NOTHING = ReadOnlyDict({})
class CacheDamaged(ValueError):
"""Record the URL or file that was a problem.
Using clear_download_cache on the .bad_file or .bad_url attribute,
whichever is not None, should resolve this particular problem.
"""
def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs):
super().__init__(*args, **kwargs)
self.bad_urls = bad_urls if bad_urls is not None else []
self.bad_files = bad_files if bad_files is not None else []
def check_download_cache(pkgname="astropy"):
"""Do a consistency check on the cache.
.. note::
Since v5.0, this function no longer returns anything.
Because the cache is shared by all versions of ``astropy`` in all virtualenvs
run by your user, possibly concurrently, it could accumulate problems.
This could lead to hard-to-debug problems or wasted space. This function
detects a number of incorrect conditions, including nonexistent files that
are indexed, files that are indexed but in the wrong place, and, if you
request it, files whose content does not match the hash that is indexed.
This function also returns a list of non-indexed files. A few will be
associated with the shelve object; their exact names depend on the backend
used but will probably be based on ``urlmap``. The presence of other files
probably indicates that something has gone wrong and inaccessible files
have accumulated in the cache. These can be removed with
:func:`clear_download_cache`, either passing the filename returned here, or
with no arguments to empty the entire cache and return it to a
reasonable, if empty, state.
Parameters
----------
pkgname : str, optional
The package name to use to locate the download cache, i.e., for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Raises
------
`~astropy.utils.data.CacheDamaged`
To indicate a problem with the cache contents; the exception contains
a ``.bad_files`` attribute containing a set of filenames to allow the
user to use :func:`clear_download_cache` to remove the offending items.
OSError, RuntimeError
To indicate some problem with the cache structure. This may need a full
:func:`clear_download_cache` to resolve, or may indicate some kind of
misconfiguration.
"""
bad_files = set()
messages = set()
dldir = _get_download_cache_loc(pkgname=pkgname)
with os.scandir(dldir) as it:
for entry in it:
f = os.path.abspath(os.path.join(dldir, entry.name))
if entry.name.startswith("rmtree-"):
if f not in _tempfilestodel:
bad_files.add(f)
messages.add(f"Cache entry {entry.name} not scheduled for deletion")
elif entry.is_dir():
for sf in os.listdir(f):
if sf in ["url", "contents"]:
continue
sf = os.path.join(f, sf)
bad_files.add(sf)
messages.add(f"Unexpected file f{sf}")
urlf = os.path.join(f, "url")
url = None
if not os.path.isfile(urlf):
bad_files.add(urlf)
messages.add(f"Problem with URL file f{urlf}")
else:
url = get_file_contents(urlf, encoding="utf-8")
if not _is_url(url):
bad_files.add(f)
messages.add(f"Malformed URL: {url}")
else:
hashname = _url_to_dirname(url)
if entry.name != hashname:
bad_files.add(f)
messages.add(
f"URL hashes to {hashname} but is stored in"
f" {entry.name}"
)
if not os.path.isfile(os.path.join(f, "contents")):
bad_files.add(f)
if url is None:
messages.add(f"Hash {entry.name} is missing contents")
else:
messages.add(
f"URL {url} with hash {entry.name} is missing contents"
)
else:
bad_files.add(f)
messages.add(f"Left-over non-directory {f} in cache")
if bad_files:
raise CacheDamaged("\n".join(messages), bad_files=bad_files)
@contextlib.contextmanager
def _SafeTemporaryDirectory(suffix=None, prefix=None, dir=None):
"""Temporary directory context manager.
This will not raise an exception if the temporary directory goes away
before it's supposed to be deleted. Specifically, what is deleted will
be the directory *name* produced; if no such directory exists, no
exception will be raised.
It would be safer to delete it only if it's really the same directory
- checked by file descriptor - and if it's still called the same thing.
But that opens a platform-specific can of worms.
It would also be more robust to use ExitStack and TemporaryDirectory,
which is more aggressive about removing readonly things.
"""
d = mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield d
finally:
try:
shutil.rmtree(d)
except OSError:
pass
def _rmtree(path, replace=None):
"""More-atomic rmtree. Ignores missing directory."""
with TemporaryDirectory(
prefix="rmtree-", dir=os.path.dirname(os.path.abspath(path))
) as d:
try:
os.rename(path, os.path.join(d, "to-zap"))
except FileNotFoundError:
pass
except PermissionError:
warn(
CacheMissingWarning(
f"Unable to remove directory {path} because a file in it "
"is in use and you are on Windows",
path,
)
)
raise
if replace is not None:
try:
os.rename(replace, path)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
def import_file_to_cache(
url_key, filename, remove_original=False, pkgname="astropy", *, replace=True
):
"""Import the on-disk file specified by filename to the cache.
The provided ``url_key`` will be the name used in the cache. The file
should contain the contents of this URL, at least notionally (the URL may
be temporarily or permanently unavailable). It is using ``url_key`` that
users will request these contents from the cache. See :func:`download_file` for
details.
If ``url_key`` already exists in the cache, it will be updated to point to
these imported contents, and its old contents will be deleted from the
cache.
Parameters
----------
url_key : str
The key to index the file under. This should probably be
the URL where the file was located, though if you obtained
it from a mirror you should use the URL of the primary
location.
filename : str
The file whose contents you want to import.
remove_original : bool
Whether to remove the original file (``filename``) once import is
complete.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
replace : boolean, optional
Whether or not to replace an existing object in the cache, if one exists.
If replacement is not requested but the object exists, silently pass.
"""
cache_dir = _get_download_cache_loc(pkgname=pkgname)
cache_dirname = _url_to_dirname(url_key)
local_dirname = os.path.join(cache_dir, cache_dirname)
local_filename = os.path.join(local_dirname, "contents")
with _SafeTemporaryDirectory(prefix="temp_dir", dir=cache_dir) as temp_dir:
temp_filename = os.path.join(temp_dir, "contents")
# Make sure we're on the same filesystem
# This will raise an exception if the url_key doesn't turn into a valid filename
shutil.copy(filename, temp_filename)
with open(os.path.join(temp_dir, "url"), "w", encoding="utf-8") as f:
f.write(url_key)
if replace:
_rmtree(local_dirname, replace=temp_dir)
else:
try:
os.rename(temp_dir, local_dirname)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
if remove_original:
os.remove(filename)
return os.path.abspath(local_filename)
def get_cached_urls(pkgname="astropy"):
"""
Get the list of URLs in the cache. Especially useful for looking up what
files are stored in your cache when you don't have internet access.
The listed URLs are the keys programs should use to access the file
contents, but those contents may have actually been obtained from a mirror.
See `~download_file` for details.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
cached_urls : list
List of cached URLs.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
return sorted(cache_contents(pkgname=pkgname).keys())
def cache_contents(pkgname="astropy"):
"""Obtain a dict mapping cached URLs to filenames.
This dictionary is a read-only snapshot of the state of the cache when this
function was called. If other processes are actively working with the
cache, it is possible for them to delete files that are listed in this
dictionary. Use with some caution if you are working on a system that is
busy with many running astropy processes, although the same issues apply to
most functions in this module.
"""
r = {}
try:
dldir = _get_download_cache_loc(pkgname=pkgname)
except OSError:
return _NOTHING
with os.scandir(dldir) as it:
for entry in it:
if entry.is_dir:
url = get_file_contents(
os.path.join(dldir, entry.name, "url"), encoding="utf-8"
)
r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents"))
return ReadOnlyDict(r)
def export_download_cache(
filename_or_obj, urls=None, overwrite=False, pkgname="astropy"
):
"""Exports the cache contents as a ZIP file.
Parameters
----------
filename_or_obj : str or file-like
Where to put the created ZIP file. Must be something the zipfile
module can write to.
urls : iterable of str or None
The URLs to include in the exported cache. The default is all
URLs currently in the cache. If a URL is included in this list
but is not currently in the cache, a KeyError will be raised.
To ensure that all are in the cache use `~download_file`
or `~download_files_in_parallel`.
overwrite : bool, optional
If filename_or_obj is a filename that exists, it will only be
overwritten if this is True.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
import_download_cache : import the contents of such a ZIP file
import_file_to_cache : import a single file directly
"""
if urls is None:
urls = get_cached_urls(pkgname)
with zipfile.ZipFile(filename_or_obj, "w" if overwrite else "x") as z:
for u in urls:
fn = download_file(u, cache=True, sources=[], pkgname=pkgname)
# Do not use os.path.join because ZIP files want
# "/" on all platforms
z_fn = urllib.parse.quote(u, safe="")
z.write(fn, z_fn)
def import_download_cache(
filename_or_obj, urls=None, update_cache=False, pkgname="astropy"
):
"""Imports the contents of a ZIP file into the cache.
Each member of the ZIP file should be named by a quoted version of the
URL whose contents it stores. These names are decoded with
:func:`~urllib.parse.unquote`.
Parameters
----------
filename_or_obj : str or file-like
Where the stored ZIP file is. Must be something the :mod:`~zipfile`
module can read from.
urls : set of str or list of str or None
The URLs to import from the ZIP file. The default is all
URLs in the file.
update_cache : bool, optional
If True, any entry in the ZIP file will overwrite the value in the
cache; if False, leave untouched any entry already in the cache.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
export_download_cache : export the contents the cache to of such a ZIP file
import_file_to_cache : import a single file directly
"""
with zipfile.ZipFile(filename_or_obj, "r") as z, TemporaryDirectory() as d:
for i, zf in enumerate(z.infolist()):
url = urllib.parse.unquote(zf.filename)
# FIXME(aarchiba): do we want some kind of validation on this URL?
# urllib.parse might do something sensible...but what URLs might
# they have?
# is_url in this file is probably a good check, not just here
# but throughout this file.
if urls is not None and url not in urls:
continue
if not update_cache and is_url_in_cache(url, pkgname=pkgname):
continue
f_temp_name = os.path.join(d, str(i))
with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp:
block = f_zip.read(conf.download_block_size)
while block:
f_temp.write(block)
block = f_zip.read(conf.download_block_size)
import_file_to_cache(
url, f_temp_name, remove_original=True, pkgname=pkgname
)
|
3c36de567646b0ece1811bb4a695ab4ab0b6fee10de3d542f0bce1307cabd155 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import datetime
import numpy as np
from astropy import units as u
from astropy.time import Time
__all__ = ["time_support"]
__doctest_requires__ = {"time_support": ["matplotlib"]}
UNSUPPORTED_FORMATS = ("datetime", "datetime64")
YMDHMS_FORMATS = ("fits", "iso", "isot", "yday")
STR_FORMATS = YMDHMS_FORMATS + ("byear_str", "jyear_str")
def time_support(*, scale=None, format=None, simplify=True):
"""
Enable support for plotting `astropy.time.Time` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.time_support(): # doctest: +IGNORE_OUTPUT
... plt.figure()
... plt.plot(Time(['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40']))
... plt.draw()
Parameters
----------
scale : str, optional
The time scale to use for the times on the axis. If not specified,
the scale of the first Time object passed to Matplotlib is used.
format : str, optional
The time format to use for the times on the axis. If not specified,
the format of the first Time object passed to Matplotlib is used.
simplify : bool, optional
If possible, simplify labels, e.g. by removing 00:00:00.000 times from
ISO strings if all labels fall on that time.
"""
import matplotlib.units as units
from matplotlib.ticker import MaxNLocator, ScalarFormatter
from astropy.visualization.wcsaxes.utils import select_step_hour, select_step_scalar
class AstropyTimeLocator(MaxNLocator):
# Note: we default to AutoLocator since many time formats
# can just use this.
def __init__(self, converter, *args, **kwargs):
kwargs["nbins"] = 4
super().__init__(*args, **kwargs)
self._converter = converter
def tick_values(self, vmin, vmax):
# Where we put the ticks depends on the format we are using
if self._converter.format in YMDHMS_FORMATS:
# If we are here, we need to check what the range of values
# is and decide how to find tick locations accordingly
vrange = vmax - vmin
if (
self._converter.format != "yday" and vrange > 31
) or vrange > 366: # greater than a month
# We need to be careful here since not all years and months have
# the same length
# Start off by converting the values from the range to
# datetime objects, so that we can easily extract the year and
# month.
tmin = Time(
vmin, scale=self._converter.scale, format="mjd"
).datetime
tmax = Time(
vmax, scale=self._converter.scale, format="mjd"
).datetime
# Find the range of years
ymin = tmin.year
ymax = tmax.year
if ymax > ymin + 1: # greater than a year
# Find the step we want to use
ystep = int(select_step_scalar(max(1, (ymax - ymin) / 3)))
ymin = ystep * (ymin // ystep)
# Generate the years for these steps
times = []
for year in range(ymin, ymax + 1, ystep):
times.append(datetime(year=year, month=1, day=1))
else: # greater than a month but less than a year
mmin = tmin.month
mmax = tmax.month + 12 * (ymax - ymin)
mstep = int(select_step_scalar(max(1, (mmax - mmin) / 3)))
mmin = mstep * max(1, mmin // mstep)
# Generate the months for these steps
times = []
for month in range(mmin, mmax + 1, mstep):
times.append(
datetime(
year=ymin + (month - 1) // 12,
month=(month - 1) % 12 + 1,
day=1,
)
)
# Convert back to MJD
values = Time(times, scale=self._converter.scale).mjd
elif vrange > 1: # greater than a day
self.set_params(steps=[1, 2, 5, 10])
values = super().tick_values(vmin, vmax)
else:
# Determine ideal step
dv = (vmax - vmin) / 3 * 24 << u.hourangle
# And round to nearest sensible value
dv = select_step_hour(dv).to_value(u.hourangle) / 24
# Determine tick locations
imin = np.ceil(vmin / dv)
imax = np.floor(vmax / dv)
values = np.arange(imin, imax + 1, dtype=np.int64) * dv
else:
values = super().tick_values(vmin, vmax)
# Get rid of values outside of the input interval
values = values[(values >= vmin) & (values <= vmax)]
return values
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
class AstropyTimeFormatter(ScalarFormatter):
def __init__(self, converter, *args, **kwargs):
super().__init__(*args, **kwargs)
self._converter = converter
self.set_useOffset(False)
self.set_scientific(False)
def format_ticks(self, values):
if len(values) == 0:
return []
if self._converter.format in YMDHMS_FORMATS:
times = Time(values, format="mjd", scale=self._converter.scale)
formatted = getattr(times, self._converter.format)
if self._converter.simplify:
if self._converter.format in ("fits", "iso", "isot"):
if all([x.endswith("00:00:00.000") for x in formatted]):
split = " " if self._converter.format == "iso" else "T"
formatted = [x.split(split)[0] for x in formatted]
elif self._converter.format == "yday":
if all([x.endswith(":001:00:00:00.000") for x in formatted]):
formatted = [x.split(":", 1)[0] for x in formatted]
return formatted
elif self._converter.format == "byear_str":
return Time(
values, format="byear", scale=self._converter.scale
).byear_str
elif self._converter.format == "jyear_str":
return Time(
values, format="jyear", scale=self._converter.scale
).jyear_str
else:
return super().format_ticks(values)
class MplTimeConverter(units.ConversionInterface):
def __init__(self, scale=None, format=None, simplify=None):
super().__init__()
self.format = format
self.scale = scale
self.simplify = simplify
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = units.registry.get(Time)
units.registry[Time] = self
@property
def format(self):
return self._format
@format.setter
def format(self, value):
if value in UNSUPPORTED_FORMATS:
raise ValueError(f"time_support does not support format={value}")
self._format = value
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._original_converter is None:
del units.registry[Time]
else:
units.registry[Time] = self._original_converter
def default_units(self, x, axis):
if isinstance(x, tuple):
x = x[0]
if self.format is None:
self.format = x.format
if self.scale is None:
self.scale = x.scale
return "astropy_time"
def convert(self, value, unit, axis):
"""
Convert a Time value to a scalar or array.
"""
scaled = getattr(value, self.scale)
if self.format in YMDHMS_FORMATS:
return scaled.mjd
elif self.format == "byear_str":
return scaled.byear
elif self.format == "jyear_str":
return scaled.jyear
else:
return getattr(scaled, self.format)
def axisinfo(self, unit, axis):
"""
Return major and minor tick locators and formatters.
"""
majloc = AstropyTimeLocator(self)
majfmt = AstropyTimeFormatter(self)
return units.AxisInfo(
majfmt=majfmt, majloc=majloc, label=f"Time ({self.scale})"
)
return MplTimeConverter(scale=scale, format=format, simplify=simplify)
|
008f395e9c60318c937dd6bcdbf39f8b476c30c38a117933ff7b0f236f864906 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import weakref
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
# from astropy.utils.compat import ignored
from astropy import log
from astropy.units import Quantity, Unit, UnitConversionError
__all__ = [
"MissingDataAssociationException",
"IncompatibleUncertaintiesException",
"NDUncertainty",
"StdDevUncertainty",
"UnknownUncertainty",
"VarianceUncertainty",
"InverseVariance",
]
class IncompatibleUncertaintiesException(Exception):
"""This exception should be used to indicate cases in which uncertainties
with two different classes can not be propagated.
"""
class MissingDataAssociationException(Exception):
"""This exception should be used to indicate that an uncertainty instance
has not been associated with a parent `~astropy.nddata.NDData` object.
"""
class NDUncertainty(metaclass=ABCMeta):
"""This is the metaclass for uncertainty classes used with `NDData`.
Parameters
----------
array : any type, optional
The array or value (the parameter name is due to historical reasons) of
the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or
`NDUncertainty` subclasses are recommended.
If the `array` is `list`-like or `numpy.ndarray`-like it will be cast
to a plain `numpy.ndarray`.
Default is ``None``.
unit : unit-like, optional
Unit for the uncertainty ``array``. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the `array` as a copy. ``True`` copies it
before saving, while ``False`` tries to save every parameter as
reference. Note however that it is not always possible to save the
input as reference.
Default is ``True``.
Raises
------
IncompatibleUncertaintiesException
If given another `NDUncertainty`-like class as ``array`` if their
``uncertainty_type`` is different.
"""
def __init__(self, array=None, copy=True, unit=None):
if isinstance(array, NDUncertainty):
# Given an NDUncertainty class or subclass check that the type
# is the same.
if array.uncertainty_type != self.uncertainty_type:
raise IncompatibleUncertaintiesException
# Check if two units are given and take the explicit one then.
if unit is not None and unit != array._unit:
# TODO : Clarify it (see NDData.init for same problem)?
log.info("overwriting Uncertainty's current unit with specified unit.")
elif array._unit is not None:
unit = array.unit
array = array.array
elif isinstance(array, Quantity):
# Check if two units are given and take the explicit one then.
if unit is not None and array.unit is not None and unit != array.unit:
log.info("overwriting Quantity's current unit with specified unit.")
elif array.unit is not None:
unit = array.unit
array = array.value
if unit is None:
self._unit = None
else:
self._unit = Unit(unit)
if copy:
array = deepcopy(array)
unit = deepcopy(unit)
self.array = array
self.parent_nddata = None # no associated NDData - until it is set!
@property
@abstractmethod
def uncertainty_type(self):
"""`str` : Short description of the type of uncertainty.
Defined as abstract property so subclasses *have* to override this.
"""
return None
@property
def supports_correlated(self):
"""`bool` : Supports uncertainty propagation with correlated uncertainties?
.. versionadded:: 1.2
"""
return False
@property
def array(self):
"""`numpy.ndarray` : the uncertainty's value."""
return self._array
@array.setter
def array(self, value):
if isinstance(value, (list, np.ndarray)):
value = np.array(value, subok=False, copy=False)
self._array = value
@property
def unit(self):
"""`~astropy.units.Unit` : The unit of the uncertainty, if any."""
return self._unit
@unit.setter
def unit(self, value):
"""
The unit should be set to a value consistent with the parent NDData
unit and the uncertainty type.
"""
if value is not None:
# Check the hidden attribute below, not the property. The property
# raises an exception if there is no parent_nddata.
if self._parent_nddata is not None:
parent_unit = self.parent_nddata.unit
try:
# Check for consistency with the unit of the parent_nddata
self._data_unit_to_uncertainty_unit(parent_unit).to(value)
except UnitConversionError:
raise UnitConversionError(
"Unit {} is incompatible with unit {} of parent nddata".format(
value, parent_unit
)
)
self._unit = Unit(value)
else:
self._unit = value
@property
def quantity(self):
"""
This uncertainty as an `~astropy.units.Quantity` object.
"""
return Quantity(self.array, self.unit, copy=False, dtype=self.array.dtype)
@property
def parent_nddata(self):
"""`NDData` : reference to `NDData` instance with this uncertainty.
In case the reference is not set uncertainty propagation will not be
possible since propagation might need the uncertain data besides the
uncertainty.
"""
no_parent_message = "uncertainty is not associated with an NDData object"
parent_lost_message = (
"the associated NDData object was deleted and cannot be accessed "
"anymore. You can prevent the NDData object from being deleted by "
"assigning it to a variable. If this happened after unpickling "
"make sure you pickle the parent not the uncertainty directly."
)
try:
parent = self._parent_nddata
except AttributeError:
raise MissingDataAssociationException(no_parent_message)
else:
if parent is None:
raise MissingDataAssociationException(no_parent_message)
else:
# The NDData is saved as weak reference so we must call it
# to get the object the reference points to. However because
# we have a weak reference here it's possible that the parent
# was deleted because its reference count dropped to zero.
if isinstance(self._parent_nddata, weakref.ref):
resolved_parent = self._parent_nddata()
if resolved_parent is None:
log.info(parent_lost_message)
return resolved_parent
else:
log.info("parent_nddata should be a weakref to an NDData object.")
return self._parent_nddata
@parent_nddata.setter
def parent_nddata(self, value):
if value is not None and not isinstance(value, weakref.ref):
# Save a weak reference on the uncertainty that points to this
# instance of NDData. Direct references should NOT be used:
# https://github.com/astropy/astropy/pull/4799#discussion_r61236832
value = weakref.ref(value)
# Set _parent_nddata here and access below with the property because value
# is a weakref
self._parent_nddata = value
# set uncertainty unit to that of the parent if it was not already set, unless initializing
# with empty parent (Value=None)
if value is not None:
parent_unit = self.parent_nddata.unit
if self.unit is None:
if parent_unit is None:
self.unit = None
else:
# Set the uncertainty's unit to the appropriate value
self.unit = self._data_unit_to_uncertainty_unit(parent_unit)
else:
# Check that units of uncertainty are compatible with those of
# the parent. If they are, no need to change units of the
# uncertainty or the data. If they are not, let the user know.
unit_from_data = self._data_unit_to_uncertainty_unit(parent_unit)
try:
unit_from_data.to(self.unit)
except UnitConversionError:
raise UnitConversionError(
"Unit {} of uncertainty "
"incompatible with unit {} of "
"data".format(self.unit, parent_unit)
)
@abstractmethod
def _data_unit_to_uncertainty_unit(self, value):
"""
Subclasses must override this property. It should take in a data unit
and return the correct unit for the uncertainty given the uncertainty
type.
"""
return None
def __repr__(self):
prefix = self.__class__.__name__ + "("
try:
body = np.array2string(self.array, separator=", ", prefix=prefix)
except AttributeError:
# In case it wasn't possible to use array2string
body = str(self.array)
return "".join([prefix, body, ")"])
def __getstate__(self):
# Because of the weak reference the class wouldn't be picklable.
try:
return self._array, self._unit, self.parent_nddata
except MissingDataAssociationException:
# In case there's no parent
return self._array, self._unit, None
def __setstate__(self, state):
if len(state) != 3:
raise TypeError("The state should contain 3 items.")
self._array = state[0]
self._unit = state[1]
parent = state[2]
if parent is not None:
parent = weakref.ref(parent)
self._parent_nddata = parent
def __getitem__(self, item):
"""Normal slicing on the array, keep the unit and return a reference."""
return self.__class__(self.array[item], unit=self.unit, copy=False)
def propagate(self, operation, other_nddata, result_data, correlation):
"""Calculate the resulting uncertainty given an operation on the data.
.. versionadded:: 1.2
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide` (or `numpy.divide`).
other_nddata : `NDData` instance
The second operand in the arithmetic operation.
result_data : `~astropy.units.Quantity` or ndarray
The result of the arithmetic operations on the data.
correlation : `numpy.ndarray` or number
The correlation (rho) is defined between the uncertainties in
sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means
uncorrelated operands.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
Another instance of the same `NDUncertainty` subclass containing
the uncertainty of the result.
Raises
------
ValueError
If the ``operation`` is not supported or if correlation is not zero
but the subclass does not support correlated uncertainties.
Notes
-----
First this method checks if a correlation is given and the subclass
implements propagation with correlated uncertainties.
Then the second uncertainty is converted (or an Exception is raised)
to the same class in order to do the propagation.
Then the appropriate propagation method is invoked and the result is
returned.
"""
# Check if the subclass supports correlation
if not self.supports_correlated:
if isinstance(correlation, np.ndarray) or correlation != 0:
raise ValueError(
"{} does not support uncertainty propagation"
" with correlation."
"".format(self.__class__.__name__)
)
# Get the other uncertainty (and convert it to a matching one)
other_uncert = self._convert_uncertainty(other_nddata.uncertainty)
if operation.__name__ == "add":
result = self._propagate_add(other_uncert, result_data, correlation)
elif operation.__name__ == "subtract":
result = self._propagate_subtract(other_uncert, result_data, correlation)
elif operation.__name__ == "multiply":
result = self._propagate_multiply(other_uncert, result_data, correlation)
elif operation.__name__ in ["true_divide", "divide"]:
result = self._propagate_divide(other_uncert, result_data, correlation)
else:
raise ValueError("unsupported operation")
return self.__class__(result, copy=False)
def _convert_uncertainty(self, other_uncert):
"""Checks if the uncertainties are compatible for propagation.
Checks if the other uncertainty is `NDUncertainty`-like and if so
verify that the uncertainty_type is equal. If the latter is not the
case try returning ``self.__class__(other_uncert)``.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The other uncertainty.
Returns
-------
other_uncert : `NDUncertainty` subclass
but converted to a compatible `NDUncertainty` subclass if
possible and necessary.
Raises
------
IncompatibleUncertaintiesException:
If the other uncertainty cannot be converted to a compatible
`NDUncertainty` subclass.
"""
if isinstance(other_uncert, NDUncertainty):
if self.uncertainty_type == other_uncert.uncertainty_type:
return other_uncert
else:
return self.__class__(other_uncert)
else:
raise IncompatibleUncertaintiesException
@abstractmethod
def _propagate_add(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
@abstractmethod
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
def represent_as(self, other_uncert):
"""Convert this uncertainty to a different uncertainty type.
Parameters
----------
other_uncert : `NDUncertainty` subclass
The `NDUncertainty` subclass to convert to.
Returns
-------
resulting_uncertainty : `NDUncertainty` instance
An instance of ``other_uncert`` subclass containing the uncertainty
converted to the new uncertainty type.
Raises
------
TypeError
If either the initial or final subclasses do not support
conversion, a `TypeError` is raised.
"""
as_variance = getattr(self, "_convert_to_variance", None)
if as_variance is None:
raise TypeError(
f"{type(self)} does not support conversion to another uncertainty type."
)
from_variance = getattr(other_uncert, "_convert_from_variance", None)
if from_variance is None:
raise TypeError(
f"{other_uncert.__name__} does not support conversion from "
"another uncertainty type."
)
return from_variance(as_variance())
class UnknownUncertainty(NDUncertainty):
"""This class implements any unknown uncertainty type.
The main purpose of having an unknown uncertainty class is to prevent
uncertainty propagation.
Parameters
----------
args, kwargs :
see `NDUncertainty`
"""
@property
def supports_correlated(self):
"""`False` : Uncertainty propagation is *not* possible for this class."""
return False
@property
def uncertainty_type(self):
"""``"unknown"`` : `UnknownUncertainty` implements any unknown \
uncertainty type.
"""
return "unknown"
def _data_unit_to_uncertainty_unit(self, value):
"""
No way to convert if uncertainty is unknown.
"""
return None
def _convert_uncertainty(self, other_uncert):
"""Raise an Exception because unknown uncertainty types cannot
implement propagation.
"""
msg = "Uncertainties of unknown type cannot be propagated."
raise IncompatibleUncertaintiesException(msg)
def _propagate_add(self, other_uncert, result_data, correlation):
"""Not possible for unknown uncertainty types."""
return None
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
class _VariancePropagationMixin:
"""
Propagation of uncertainties for variances, also used to perform error
propagation for variance-like uncertainties (standard deviation and inverse
variance).
"""
def _propagate_add_sub(
self,
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for addition or subtraction of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
if subtract:
correlation_sign = -1
else:
correlation_sign = 1
try:
result_unit_sq = result_data.unit**2
except AttributeError:
result_unit_sq = None
if other_uncert.array is not None:
# Formula: sigma**2 = dB
if other_uncert.unit is not None and result_unit_sq != to_variance(
other_uncert.unit
):
# If the other uncertainty has a unit and this unit differs
# from the unit of the result convert it to the results unit
other = (
to_variance(other_uncert.array << other_uncert.unit)
.to(result_unit_sq)
.value
)
else:
other = to_variance(other_uncert.array)
else:
other = 0
if self.array is not None:
# Formula: sigma**2 = dA
if (
self.unit is not None
and to_variance(self.unit) != self.parent_nddata.unit**2
):
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = to_variance(self.array << self.unit).to(result_unit_sq).value
else:
this = to_variance(self.array)
else:
this = 0
# Formula: sigma**2 = dA + dB +/- 2*cor*sqrt(dA*dB)
# Formula: sigma**2 = sigma_other + sigma_self +/- 2*cor*sqrt(dA*dB)
# (sign depends on whether addition or subtraction)
# Determine the result depending on the correlation
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = 2 * correlation * np.sqrt(this * other)
result = this + other + correlation_sign * corr
else:
result = this + other
return from_variance(result)
def _propagate_multiply_divide(
self,
other_uncert,
result_data,
correlation,
divide=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for multiplication or division of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
divide : bool, optional
If ``True``, propagate for division, otherwise propagate for
multiplication.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
# For multiplication we don't need the result as quantity
if isinstance(result_data, Quantity):
result_data = result_data.value
if divide:
correlation_sign = -1
else:
correlation_sign = 1
if other_uncert.array is not None:
# We want the result to have a unit consistent with the parent, so
# we only need to convert the unit of the other uncertainty if it
# is different from its data's unit.
if (
other_uncert.unit
and to_variance(1 * other_uncert.unit)
!= ((1 * other_uncert.parent_nddata.unit) ** 2).unit
):
d_b = (
to_variance(other_uncert.array << other_uncert.unit)
.to((1 * other_uncert.parent_nddata.unit) ** 2)
.value
)
else:
d_b = to_variance(other_uncert.array)
# Formula: sigma**2 = |A|**2 * d_b
right = np.abs(self.parent_nddata.data**2 * d_b)
else:
right = 0
if self.array is not None:
# Just the reversed case
if (
self.unit
and to_variance(1 * self.unit)
!= ((1 * self.parent_nddata.unit) ** 2).unit
):
d_a = (
to_variance(self.array << self.unit)
.to((1 * self.parent_nddata.unit) ** 2)
.value
)
else:
d_a = to_variance(self.array)
# Formula: sigma**2 = |B|**2 * d_a
left = np.abs(other_uncert.parent_nddata.data**2 * d_a)
else:
left = 0
# Multiplication
#
# The fundamental formula is:
# sigma**2 = |AB|**2*(d_a/A**2+d_b/B**2+2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# This formula is not very handy since it generates NaNs for every
# zero in A and B. So we rewrite it:
#
# Multiplication Formula:
# sigma**2 = (d_a*B**2 + d_b*A**2 + (2 * cor * ABsqrt(dAdB)))
# sigma**2 = (left + right + (2 * cor * ABsqrt(dAdB)))
#
# Division
#
# The fundamental formula for division is:
# sigma**2 = |A/B|**2*(d_a/A**2+d_b/B**2-2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# As with multiplication, it is convenient to rewrite this to avoid
# nans where A is zero.
#
# Division formula (rewritten):
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2 * cor * A *sqrt(dAdB) / B**3
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2*cor * sqrt(d_a)/B**2 * sqrt(d_b) * A / B
# sigma**2 = multiplication formula/B**4 (and sign change in
# the correlation)
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = (
2
* correlation
* np.sqrt(d_a * d_b)
* self.parent_nddata.data
* other_uncert.parent_nddata.data
)
else:
corr = 0
if divide:
return from_variance(
(left + right + correlation_sign * corr)
/ other_uncert.parent_nddata.data**4
)
else:
return from_variance(left + right + correlation_sign * corr)
class StdDevUncertainty(_VariancePropagationMixin, NDUncertainty):
"""Standard deviation uncertainty assuming first order gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `StdDevUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will have the same unit as the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
`StdDevUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, StdDevUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=StdDevUncertainty([0.1, 0.1, 0.1]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.1, 0.1, 0.1])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = StdDevUncertainty([0.2], unit='m', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
StdDevUncertainty([0.2])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 2
>>> ndd.uncertainty
StdDevUncertainty(2)
.. note::
The unit will not be displayed.
"""
@property
def supports_correlated(self):
"""`True` : `StdDevUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
@property
def uncertainty_type(self):
"""``"std"`` : `StdDevUncertainty` implements standard deviation."""
return "std"
def _convert_uncertainty(self, other_uncert):
if isinstance(other_uncert, StdDevUncertainty):
return other_uncert
else:
raise IncompatibleUncertaintiesException
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=True,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=False,
to_variance=np.square,
from_variance=np.sqrt,
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=True,
to_variance=np.square,
from_variance=np.sqrt,
)
def _data_unit_to_uncertainty_unit(self, value):
return value
def _convert_to_variance(self):
new_array = None if self.array is None else self.array**2
new_unit = None if self.unit is None else self.unit**2
return VarianceUncertainty(new_array, unit=new_unit)
@classmethod
def _convert_from_variance(cls, var_uncert):
new_array = None if var_uncert.array is None else var_uncert.array ** (1 / 2)
new_unit = None if var_uncert.unit is None else var_uncert.unit ** (1 / 2)
return cls(new_array, unit=new_unit)
class VarianceUncertainty(_VariancePropagationMixin, NDUncertainty):
"""
Variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `VarianceUncertainty`. The class can handle if the uncertainty has a
unit that differs from (but is convertible to) the parents `NDData` unit.
The unit of the resulting uncertainty will be the square of the unit of the
resulting data. Also support for correlation is possible but requires the
correlation as input. It cannot handle correlation determination itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`VarianceUncertainty` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, VarianceUncertainty
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=VarianceUncertainty([0.01, 0.01, 0.01]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.01, 0.01, 0.01])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = VarianceUncertainty([0.04], unit='m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
VarianceUncertainty([0.04])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 4
>>> ndd.uncertainty
VarianceUncertainty(4)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"var"`` : `VarianceUncertainty` implements variance."""
return "var"
@property
def supports_correlated(self):
"""`True` : `VarianceUncertainty` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert, result_data, correlation, subtract=False
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert, result_data, correlation, subtract=True
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert, result_data, correlation, divide=False
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert, result_data, correlation, divide=True
)
def _data_unit_to_uncertainty_unit(self, value):
return value**2
def _convert_to_variance(self):
return self
@classmethod
def _convert_from_variance(cls, var_uncert):
return var_uncert
def _inverse(x):
"""Just a simple inverse for use in the InverseVariance."""
return 1 / x
class InverseVariance(_VariancePropagationMixin, NDUncertainty):
"""
Inverse variance uncertainty assuming first order Gaussian error
propagation.
This class implements uncertainty propagation for ``addition``,
``subtraction``, ``multiplication`` and ``division`` with other instances
of `InverseVariance`. The class can handle if the uncertainty has a unit
that differs from (but is convertible to) the parents `NDData` unit. The
unit of the resulting uncertainty will the inverse square of the unit of
the resulting data. Also support for correlation is possible but requires
the correlation as input. It cannot handle correlation determination
itself.
Parameters
----------
args, kwargs :
see `NDUncertainty`
Examples
--------
Compare this example to that in `StdDevUncertainty`; the uncertainties
in the examples below are equivalent to the uncertainties in
`StdDevUncertainty`.
`InverseVariance` should always be associated with an `NDData`-like
instance, either by creating it during initialization::
>>> from astropy.nddata import NDData, InverseVariance
>>> ndd = NDData([1,2,3], unit='m',
... uncertainty=InverseVariance([100, 100, 100]))
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([100, 100, 100])
or by setting it manually on the `NDData` instance::
>>> ndd.uncertainty = InverseVariance([25], unit='1/m^2', copy=True)
>>> ndd.uncertainty # doctest: +FLOAT_CMP
InverseVariance([25])
the uncertainty ``array`` can also be set directly::
>>> ndd.uncertainty.array = 0.25
>>> ndd.uncertainty
InverseVariance(0.25)
.. note::
The unit will not be displayed.
"""
@property
def uncertainty_type(self):
"""``"ivar"`` : `InverseVariance` implements inverse variance."""
return "ivar"
@property
def supports_correlated(self):
"""`True` : `InverseVariance` allows to propagate correlated \
uncertainties.
``correlation`` must be given, this class does not implement computing
it by itself.
"""
return True
def _propagate_add(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_subtract(self, other_uncert, result_data, correlation):
return super()._propagate_add_sub(
other_uncert,
result_data,
correlation,
subtract=True,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_multiply(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=False,
to_variance=_inverse,
from_variance=_inverse,
)
def _propagate_divide(self, other_uncert, result_data, correlation):
return super()._propagate_multiply_divide(
other_uncert,
result_data,
correlation,
divide=True,
to_variance=_inverse,
from_variance=_inverse,
)
def _data_unit_to_uncertainty_unit(self, value):
return 1 / value**2
def _convert_to_variance(self):
new_array = None if self.array is None else 1 / self.array
new_unit = None if self.unit is None else 1 / self.unit
return VarianceUncertainty(new_array, unit=new_unit)
@classmethod
def _convert_from_variance(cls, var_uncert):
new_array = None if var_uncert.array is None else 1 / var_uncert.array
new_unit = None if var_uncert.unit is None else 1 / var_uncert.unit
return cls(new_array, unit=new_unit)
|
006db280d9a4dc07616fc2953d70550bfc8a474b316503c65379900fb126158b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDData class.
from copy import deepcopy
import numpy as np
from astropy import log
from astropy.units import Quantity, Unit
from astropy.utils.metadata import MetaData
from astropy.wcs.wcsapi import SlicedLowLevelWCS # noqa: F401
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS, HighLevelWCSWrapper
from .nddata_base import NDDataBase
from .nduncertainty import NDUncertainty, UnknownUncertainty
__all__ = ["NDData"]
_meta_doc = """`dict`-like : Additional meta information about the dataset."""
class NDData(NDDataBase):
"""
A container for `numpy.ndarray`-based datasets, using the
`~astropy.nddata.NDDataBase` interface.
The key distinction from raw `numpy.ndarray` is the presence of
additional metadata such as uncertainty, mask, unit, a coordinate system
and/or a dictionary containing further meta information. This class *only*
provides a container for *storing* such datasets. For further functionality
take a look at the ``See also`` section.
See also: https://docs.astropy.org/en/stable/nddata/
Parameters
----------
data : `numpy.ndarray`-like or `NDData`-like
The dataset.
uncertainty : any type, optional
Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, for example ``"std"`` for standard deviation or
``"var"`` for variance. A metaclass defining such an interface is
`NDUncertainty` - but isn't mandatory. If the uncertainty has no such
attribute the uncertainty is stored as `UnknownUncertainty`.
Defaults to ``None``.
mask : any type, optional
Mask for the dataset. Masks should follow the ``numpy`` convention that
**valid** data points are marked by ``False`` and **invalid** ones with
``True``.
Defaults to ``None``.
wcs : any type, optional
World coordinate system (WCS) for the dataset.
Default is ``None``.
meta : `dict`-like object, optional
Additional meta information about the dataset. If no meta is provided
an empty `collections.OrderedDict` is created.
Default is ``None``.
unit : unit-like, optional
Unit for the dataset. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the arguments as copy. ``True`` copies
every attribute before saving it while ``False`` tries to save every
parameter as reference.
Note however that it is not always possible to save the input as
reference.
Default is ``False``.
.. versionadded:: 1.2
psf : `numpy.ndarray` or None, optional
Image representation of the PSF. In order for convolution to be flux-
preserving, this should generally be normalized to sum to unity.
Raises
------
TypeError
In case ``data`` or ``meta`` don't meet the restrictions.
Notes
-----
Each attribute can be accessed through the homonymous instance attribute:
``data`` in a `NDData` object can be accessed through the `data`
attribute::
>>> from astropy.nddata import NDData
>>> nd = NDData([1,2,3])
>>> nd.data
array([1, 2, 3])
Given a conflicting implicit and an explicit parameter during
initialization, for example the ``data`` is a `~astropy.units.Quantity` and
the unit parameter is not ``None``, then the implicit parameter is replaced
(without conversion) by the explicit one and a warning is issued::
>>> import numpy as np
>>> import astropy.units as u
>>> q = np.array([1,2,3,4]) * u.m
>>> nd2 = NDData(q, unit=u.cm)
INFO: overwriting Quantity's current unit with specified unit. [astropy.nddata.nddata]
>>> nd2.data # doctest: +FLOAT_CMP
array([1., 2., 3., 4.])
>>> nd2.unit
Unit("cm")
See Also
--------
NDDataRef
NDDataArray
"""
# Instead of a custom property use the MetaData descriptor also used for
# Tables. It will check if the meta is dict-like or raise an exception.
meta = MetaData(doc=_meta_doc, copy=False)
def __init__(
self,
data,
uncertainty=None,
mask=None,
wcs=None,
meta=None,
unit=None,
copy=False,
psf=None,
):
# Rather pointless since the NDDataBase does not implement any setting
# but before the NDDataBase did call the uncertainty
# setter. But if anyone wants to alter this behavior again the call
# to the superclass NDDataBase should be in here.
super().__init__()
# Check if data is any type from which to collect some implicitly
# passed parameters.
if isinstance(data, NDData): # don't use self.__class__ (issue #4137)
# Of course we need to check the data because subclasses with other
# init-logic might be passed in here. We could skip these
# tests if we compared for self.__class__ but that has other
# drawbacks.
# Comparing if there is an explicit and an implicit unit parameter.
# If that is the case use the explicit one and issue a warning
# that there might be a conflict. In case there is no explicit
# unit just overwrite the unit parameter with the NDData.unit
# and proceed as if that one was given as parameter. Same for the
# other parameters.
if unit is not None and data.unit is not None and unit != data.unit:
log.info("overwriting NDData's current unit with specified unit.")
elif data.unit is not None:
unit = data.unit
if uncertainty is not None and data.uncertainty is not None:
log.info(
"overwriting NDData's current "
"uncertainty with specified uncertainty."
)
elif data.uncertainty is not None:
uncertainty = data.uncertainty
if mask is not None and data.mask is not None:
log.info("overwriting NDData's current mask with specified mask.")
elif data.mask is not None:
mask = data.mask
if wcs is not None and data.wcs is not None:
log.info("overwriting NDData's current wcs with specified wcs.")
elif data.wcs is not None:
wcs = data.wcs
if psf is not None and data.psf is not None:
log.info("Overwriting NDData's current psf with specified psf.")
elif data.psf is not None:
psf = data.psf
if meta is not None and data.meta is not None:
log.info("overwriting NDData's current meta with specified meta.")
elif data.meta is not None:
meta = data.meta
data = data.data
else:
if hasattr(data, "mask") and hasattr(data, "data"):
# Separating data and mask
if mask is not None:
log.info(
"overwriting Masked Objects's current mask with specified mask."
)
else:
mask = data.mask
# Just save the data for further processing, we could be given
# a masked Quantity or something else entirely. Better to check
# it first.
data = data.data
if isinstance(data, Quantity):
if unit is not None and unit != data.unit:
log.info("overwriting Quantity's current unit with specified unit.")
else:
unit = data.unit
data = data.value
# Quick check on the parameters if they match the requirements.
if (
not hasattr(data, "shape")
or not hasattr(data, "__getitem__")
or not hasattr(data, "__array__")
):
# Data doesn't look like a numpy array, try converting it to
# one.
data = np.array(data, subok=True, copy=False)
# Another quick check to see if what we got looks like an array
# rather than an object (since numpy will convert a
# non-numerical/non-string inputs to an array of objects).
if data.dtype == "O":
raise TypeError("could not convert data to numpy array.")
if unit is not None:
unit = Unit(unit)
if copy:
# Data might have been copied before but no way of validating
# without another variable.
data = deepcopy(data)
mask = deepcopy(mask)
wcs = deepcopy(wcs)
psf = deepcopy(psf)
meta = deepcopy(meta)
uncertainty = deepcopy(uncertainty)
# Actually - copying the unit is unnecessary but better safe
# than sorry :-)
unit = deepcopy(unit)
# Store the attributes
self._data = data
self.mask = mask
self._wcs = None
if wcs is not None:
# Validate the wcs
self.wcs = wcs
self.meta = meta # TODO: Make this call the setter sometime
self._unit = unit
# Call the setter for uncertainty to further check the uncertainty
self.uncertainty = uncertainty
self.psf = psf
def __str__(self):
data = str(self.data)
unit = f" {self.unit}" if self.unit is not None else ""
return data + unit
def __repr__(self):
prefix = self.__class__.__name__ + "("
data = np.array2string(self.data, separator=", ", prefix=prefix)
unit = f", unit='{self.unit}'" if self.unit is not None else ""
return "".join((prefix, data, unit, ")"))
@property
def data(self):
"""
`~numpy.ndarray`-like : The stored dataset.
"""
return self._data
@property
def mask(self):
"""
any type : Mask for the dataset, if any.
Masks should follow the ``numpy`` convention that valid data points are
marked by ``False`` and invalid ones with ``True``.
"""
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
@property
def unit(self):
"""
`~astropy.units.Unit` : Unit for the dataset, if any.
"""
return self._unit
@property
def wcs(self):
"""
any type : A world coordinate system (WCS) for the dataset, if any.
"""
return self._wcs
@wcs.setter
def wcs(self, wcs):
if self._wcs is not None and wcs is not None:
raise ValueError(
"You can only set the wcs attribute with a WCS if no WCS is present."
)
if wcs is None or isinstance(wcs, BaseHighLevelWCS):
self._wcs = wcs
elif isinstance(wcs, BaseLowLevelWCS):
self._wcs = HighLevelWCSWrapper(wcs)
else:
raise TypeError(
"The wcs argument must implement either the high or low level WCS API."
)
@property
def psf(self):
return self._psf
@psf.setter
def psf(self, value):
self._psf = value
@property
def uncertainty(self):
"""
any type : Uncertainty in the dataset, if any.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``'std'`` for standard deviation or
``'var'`` for variance. A metaclass defining such an interface is
`~astropy.nddata.NDUncertainty` but isn't mandatory.
"""
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
# There is one requirements on the uncertainty: That
# it has an attribute 'uncertainty_type'.
# If it does not match this requirement convert it to an unknown
# uncertainty.
if not hasattr(value, "uncertainty_type"):
log.info("uncertainty should have attribute uncertainty_type.")
value = UnknownUncertainty(value, copy=False)
# If it is a subclass of NDUncertainty we must set the
# parent_nddata attribute. (#4152)
if isinstance(value, NDUncertainty):
# In case the uncertainty already has a parent create a new
# instance because we need to assume that we don't want to
# steal the uncertainty from another NDData object
if value._parent_nddata is not None:
value = value.__class__(value, copy=False)
# Then link it to this NDData instance (internally this needs
# to be saved as weakref but that's done by NDUncertainty
# setter).
value.parent_nddata = self
self._uncertainty = value
|
3681a05b204be8fd0e3e0ff91a1e9823fd77d4c9e905d94e5c8f96ad05d498ed | """
A module that provides functions for manipulating bit masks and data quality
(DQ) arrays.
"""
import numbers
import warnings
from collections import OrderedDict
import numpy as np
__all__ = [
"bitfield_to_boolean_mask",
"interpret_bit_flags",
"BitFlagNameMap",
"extend_bit_flag_map",
"InvalidBitFlag",
]
_ENABLE_BITFLAG_CACHING = True
_MAX_UINT_TYPE = np.maximum_sctype(np.uint)
_SUPPORTED_FLAGS = int(np.bitwise_not(0, dtype=_MAX_UINT_TYPE, casting="unsafe"))
def _is_bit_flag(n):
"""
Verifies if the input number is a bit flag (i.e., an integer number that is
an integer power of 2).
Parameters
----------
n : int
A positive integer number. Non-positive integers are considered not to
be "flags".
Returns
-------
bool
``True`` if input ``n`` is a bit flag and ``False`` if it is not.
"""
if n < 1:
return False
return bin(n).count("1") == 1
def _is_int(n):
return (isinstance(n, numbers.Integral) and not isinstance(n, bool)) or (
isinstance(n, np.generic) and np.issubdtype(n, np.integer)
)
class InvalidBitFlag(ValueError):
"""Indicates that a value is not an integer that is a power of 2."""
pass
class BitFlag(int):
"""Bit flags: integer values that are powers of 2."""
def __new__(cls, val, doc=None):
if isinstance(val, tuple):
if doc is not None:
raise ValueError("Flag's doc string cannot be provided twice.")
val, doc = val
if not (_is_int(val) and _is_bit_flag(val)):
raise InvalidBitFlag(
"Value '{}' is not a valid bit flag: bit flag value must be "
"an integral power of two.".format(val)
)
s = int.__new__(cls, val)
if doc is not None:
s.__doc__ = doc
return s
class BitFlagNameMeta(type):
def __new__(mcls, name, bases, members):
for k, v in members.items():
if not k.startswith("_"):
v = BitFlag(v)
attr = [k for k in members.keys() if not k.startswith("_")]
attrl = list(map(str.lower, attr))
if _ENABLE_BITFLAG_CACHING:
cache = OrderedDict()
for b in bases:
for k, v in b.__dict__.items():
if k.startswith("_"):
continue
kl = k.lower()
if kl in attrl:
idx = attrl.index(kl)
raise AttributeError(
f"Bit flag '{attr[idx]:s}' was already defined."
)
if _ENABLE_BITFLAG_CACHING:
cache[kl] = v
members = {
k: v if k.startswith("_") else BitFlag(v) for k, v in members.items()
}
if _ENABLE_BITFLAG_CACHING:
cache.update(
{k.lower(): v for k, v in members.items() if not k.startswith("_")}
)
members = {"_locked": True, "__version__": "", **members, "_cache": cache}
else:
members = {"_locked": True, "__version__": "", **members}
return super().__new__(mcls, name, bases, members)
def __setattr__(cls, name, val):
if name == "_locked":
return super().__setattr__(name, True)
else:
if name == "__version__":
if cls._locked:
raise AttributeError("Version cannot be modified.")
return super().__setattr__(name, val)
err_msg = f"Bit flags are read-only. Unable to reassign attribute {name}"
if cls._locked:
raise AttributeError(err_msg)
namel = name.lower()
if _ENABLE_BITFLAG_CACHING:
if not namel.startswith("_") and namel in cls._cache:
raise AttributeError(err_msg)
else:
for b in cls.__bases__:
if not namel.startswith("_") and namel in list(
map(str.lower, b.__dict__)
):
raise AttributeError(err_msg)
if namel in list(map(str.lower, cls.__dict__)):
raise AttributeError(err_msg)
val = BitFlag(val)
if _ENABLE_BITFLAG_CACHING and not namel.startswith("_"):
cls._cache[namel] = val
return super().__setattr__(name, val)
def __getattr__(cls, name):
if _ENABLE_BITFLAG_CACHING:
flagnames = cls._cache
else:
flagnames = {k.lower(): v for k, v in cls.__dict__.items()}
flagnames.update(
{k.lower(): v for b in cls.__bases__ for k, v in b.__dict__.items()}
)
try:
return flagnames[name.lower()]
except KeyError:
raise AttributeError(f"Flag '{name}' not defined")
def __getitem__(cls, key):
return cls.__getattr__(key)
def __add__(cls, items):
if not isinstance(items, dict):
if not isinstance(items[0], (tuple, list)):
items = [items]
items = dict(items)
return extend_bit_flag_map(
cls.__name__ + "_" + "_".join([k for k in items]), cls, **items
)
def __iadd__(cls, other):
raise NotImplementedError(
"Unary '+' is not supported. Use binary operator instead."
)
def __delattr__(cls, name):
raise AttributeError(
f"{cls.__name__}: cannot delete {cls.mro()[-2].__name__} member."
)
def __delitem__(cls, name):
raise AttributeError(
f"{cls.__name__}: cannot delete {cls.mro()[-2].__name__} member."
)
def __repr__(cls):
return f"<{cls.mro()[-2].__name__} '{cls.__name__}'>"
class BitFlagNameMap(metaclass=BitFlagNameMeta):
"""
A base class for bit flag name maps used to describe data quality (DQ)
flags of images by provinding a mapping from a mnemonic flag name to a flag
value.
Mapping for a specific instrument should subclass this class.
Subclasses should define flags as class attributes with integer values
that are powers of 2. Each bit flag may also contain a string
comment following the flag value.
Examples
--------
>>> from astropy.nddata.bitmask import BitFlagNameMap
>>> class ST_DQ(BitFlagNameMap):
... __version__ = '1.0.0' # optional
... CR = 1, 'Cosmic Ray'
... CLOUDY = 4 # no docstring comment
... RAINY = 8, 'Dome closed'
...
>>> class ST_CAM1_DQ(ST_DQ):
... HOT = 16
... DEAD = 32
"""
pass
def extend_bit_flag_map(cls_name, base_cls=BitFlagNameMap, **kwargs):
"""
A convenience function for creating bit flags maps by subclassing an
existing map and adding additional flags supplied as keyword arguments.
Parameters
----------
cls_name : str
Class name of the bit flag map to be created.
base_cls : BitFlagNameMap, optional
Base class for the new bit flag map.
**kwargs : int
Each supplied keyword argument will be used to define bit flag
names in the new map. In addition to bit flag names, ``__version__`` is
allowed to indicate the version of the newly created map.
Examples
--------
>>> from astropy.nddata.bitmask import extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', __version__='1.0.0', CR=1, CLOUDY=4, RAINY=8)
>>> ST_CAM1_DQ = extend_bit_flag_map('ST_CAM1_DQ', ST_DQ, HOT=16, DEAD=32)
>>> ST_CAM1_DQ['HOT'] # <-- Access flags as dictionary keys
16
>>> ST_CAM1_DQ.HOT # <-- Access flags as class attributes
16
"""
new_cls = BitFlagNameMeta.__new__(
BitFlagNameMeta, cls_name, (base_cls,), {"_locked": False}
)
for k, v in kwargs.items():
try:
setattr(new_cls, k, v)
except AttributeError as e:
if new_cls[k] != int(v):
raise e
new_cls._locked = True
return new_cls
def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None):
"""
Converts input bit flags to a single integer value (bit mask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
string of comma-, ``'|'``-, or ``'+'``-separated list of flags),
the returned bit mask is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bit mask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bit mask or a Python list of bit flags, set
``flip_bits`` for `True` in order to flip the bits of the returned
bit mask.
Parameters
----------
bit_flags : int, str, list, None
An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or
``'+'``-separated list of integer bit flags or mnemonic flag names,
or a Python list of integer bit flags. If ``bit_flags`` is a `str`
and if it is prepended with '~', then the output bit mask will have
its bits flipped (compared to simple sum of input flags).
For input ``bit_flags`` that is already a bit mask or a Python list
of bit flags, bit-flipping can be controlled through ``flip_bits``
parameter.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
.. note::
Only one flag separator is supported at a time. ``bit_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bit mask
obtained from input bit flags. This parameter must be set to `None`
when input ``bit_flags`` is either `None` or a Python list of flags.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
bitmask : int or None
Returns an integer bit mask formed from the input bit value or `None`
if input ``bit_flags`` parameter is `None` or an empty string.
If input string value was prepended with '~' (or ``flip_bits`` was set
to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32)
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)',
... flag_name_map=ST_DQ))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return ~int(bit_flags) if flip_bits else int(bit_flags)
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, str):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ["", "NONE", "INDEF"]:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find("~")
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count("(")
nrpar = bit_flags.count(")")
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parentheses in bit flag list.")
lpar_pos = bit_flags.find("(")
rpar_pos = bit_flags.rfind(")")
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError(
"Incorrect syntax (incorrect use of parenthesis) in bit flag list."
)
bit_flags = bit_flags[1:-1].strip()
if sum(k in bit_flags for k in "+,|") > 1:
raise ValueError(
"Only one type of bit flag separator may be used in one "
"expression. Allowed separators are: '+', '|', or ','."
)
if "," in bit_flags:
bit_flags = bit_flags.split(",")
elif "+" in bit_flags:
bit_flags = bit_flags.split("+")
elif "|" in bit_flags:
bit_flags = bit_flags.split("|")
else:
if bit_flags == "":
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
if flag_name_map is not None:
try:
int(bit_flags[0])
except ValueError:
bit_flags = [flag_name_map[f] for f in bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, "__iter__"):
if not all([_is_int(flag) for flag in bit_flags]):
if flag_name_map is not None and all(
[isinstance(flag, str) for flag in bit_flags]
):
bit_flags = [flag_name_map[f] for f in bit_flags]
else:
raise TypeError(
"Every bit flag in a list must be either an "
"integer flag value or a 'str' flag name."
)
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not _is_bit_flag(v) and not allow_non_flags:
raise ValueError(
f"Input list contains invalid (not powers of two) bit flag: {v}"
)
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask
def bitfield_to_boolean_mask(
bitfield,
ignore_flags=0,
flip_bits=None,
good_mask_value=False,
dtype=np.bool_,
flag_name_map=None,
):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=False, dtype=numpy.bool_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bit mask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (default = 0)
An integer bit mask, `None`, a Python list of bit flags, a comma-,
or ``'|'``-separated, ``'+'``-separated string list of integer
bit flags or mnemonic flag names that indicate what bits in the input
``bitfield`` should be *ignored* (i.e., zeroed), or `None`.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bit mask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bit mask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is a Python list of integer bit
flags, these flags are added together to create an integer bit mask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bit mask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
``'+'``(or ``'|'``)-separated list of integer bit flags that should
be added (bitwise OR) together to create an integer bit mask.
For example, both ``'4,8'``, ``'4|8'``, and ``'4+8'`` are equivalent
and indicate that bit flags 4 and 8 in the input ``bitfield``
array should be ignored when generating boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpreted as an
integer bit mask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
Only one flag separator is supported at a time. ``ignore_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (default = None)
Specifies whether or not to invert the bits of the bit mask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposed to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bit mask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bit mask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (default = False)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or ``numpy.True_`` then values in the
output boolean mask array corresponding to "good" bit fields in
``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``)
or 1 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be ``numpy.False_`` (or 0). When
``good_mask_value`` is zero or ``numpy.False_`` then the values
in the output boolean mask array corresponding to "good" bit fields
in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is
``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values
of corresponding to "bad" flags will be ``numpy.True_`` (or 1).
dtype : data-type (default = ``numpy.bool_``)
The desired data-type for the output binary mask array.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
mask : ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer
``dtype``) according to values of to the input ``bitfield`` elements,
``ignore_flags`` parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from astropy.nddata import bitmask
>>> import numpy as np
>>> dqarr = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0],
... [10, 4, 0, 0, 0, 16, 6, 0]])
>>> flag_map = bitmask.extend_bit_flag_map(
... 'ST_DQ', CR=2, CLOUDY=4, RAINY=8, HOT=16, DEAD=32
... )
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=int)
array([[0, 0, 1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=bool)
array([[False, False, True, True, False, True, True, False],
[ True, True, False, False, False, True, True, False]]...)
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6,
... good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=~6,
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, dtype=int,
... flip_bits=True, good_mask_value=0)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(2+4)',
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=[2, 4],
... flip_bits=True, good_mask_value=0,
... dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR,CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR+CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(
ignore_flags, flip_bits=flip_bits, flag_name_map=flag_name_map
)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & _SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(
ignore_mask, dtype=bitfield.dtype.type, casting="unsafe"
)
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting="unsafe")
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
8fa90224713abad3fd1d83bcdbe99eef19c3b70ced2993933e7f1c746cce2297 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Under the hood, there are 3 separate classes that perform different
# parts of the transformation:
#
# - `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
# functionality in `wcslib`_. (This includes TPV and TPD
# polynomial distortion, but not SIP distortion).
#
# - `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
# `SIP`_ convention.
#
# - `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
# lookup tables.
#
# Additionally, the class `WCS` aggregates all of these transformations
# together in a pipeline:
#
# - Detector to image plane correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
# object)
#
# - `distortion paper`_ table-lookup correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
# STDLIB
import builtins
import copy
import io
import itertools
import os
import re
import textwrap
import uuid
import warnings
# THIRD-PARTY
import numpy as np
from packaging.version import Version
# LOCAL
from astropy import log
from astropy import units as u
from astropy.io import fits
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyUserWarning,
AstropyWarning,
)
from . import _wcs, docstrings
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = [
"FITSFixedWarning",
"WCS",
"find_all_wcs",
"DistortionLookupTable",
"Sip",
"Tabprm",
"Wcsprm",
"Auxprm",
"Celprm",
"Prjprm",
"Wtbarr",
"WCSBase",
"validate",
"WcsError",
"SingularMatrixError",
"InconsistentAxisTypesError",
"InvalidTransformError",
"InvalidCoordinateError",
"InvalidPrjParametersError",
"NoSolutionError",
"InvalidSubimageSpecificationError",
"NoConvergence",
"NonseparableSubimageCoordinateSystemError",
"NoWcsKeywordsFoundError",
"InvalidTabularParametersError",
]
__doctest_skip__ = ["WCS.all_world2pix"]
if _wcs is not None:
if Version(_wcs.__version__) < Version("5.8"):
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used."
)
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build on your platform."
)
_WCSSUB_TIME_SUPPORT = Version(_wcs.__version__) >= Version("7.8")
_WCS_TPD_WARN_LT71 = Version(_wcs.__version__) < Version("7.1")
_WCS_TPD_WARN_LT74 = Version(_wcs.__version__) < Version("7.4")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Auxprm = _wcs.Auxprm
Celprm = _wcs.Celprm
Prjprm = _wcs.Prjprm
Tabprm = _wcs.Tabprm
Wtbarr = _wcs.Wtbarr
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = (
_wcs.NonseparableSubimageCoordinateSystemError
)
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
InvalidPrjParametersError = _wcs.InvalidPrjParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(("WCSSUB_", "WCSHDR_", "WCSHDO_", "WCSCOMPARE_", "PRJ_")):
locals()[key] = val
__all__.append(key)
# Set coordinate extraction callback for WCS -TAB:
def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim):
arr = hdulist[(extnam, extver)].data[ttype][row - 1]
if arr.ndim != ndim:
if kind == "c" and ndim == 2:
arr = arr.reshape((arr.size, 1))
else:
raise ValueError("Bad TDIM")
return np.ascontiguousarray(arr, dtype=np.double)
_wcs.set_wtbarr_fitsio_callback(_load_tab_bintable)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
Wtbarr = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
_WCSSUB_TIME_SUPPORT = False
_WCS_TPD_WARN_LT71 = False
_WCS_TPD_WARN_LT74 = False
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile("""^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$""")
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == "image":
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == "binary":
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == "pixel":
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' and/or 'pixel'"
)
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(
self,
*args,
best_solution=None,
accuracy=None,
niter=None,
divergent=None,
slow_conv=None,
**kwargs,
):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn(
f"Function received unexpected arguments ({list(kwargs)}) these "
"are ignored but will raise an Exception in the "
"future.",
AstropyDeprecationWarning,
)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: https://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : `~astropy.io.fits.HDUList`, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of str, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
"""
def __init__(
self,
header=None,
fobj=None,
key=" ",
minerr=0.0,
relax=True,
naxis=None,
keysel=None,
colsel=None,
fix=True,
translate_units="",
_do_set=True,
):
close_fds = []
# these parameters are stored to be used when unpickling a WCS object:
self._init_kwargs = {
"keysel": copy.copy(keysel),
"colsel": copy.copy(colsel),
}
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key, relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = os.path.exists(header)
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2"
)
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object"
)
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
if not (fobj is None or isinstance(fobj, fits.HDUList)):
raise AssertionError(
"'fobj' must be either None or an astropy.io.fits.HDUList object."
)
est_naxis = 2
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode("ascii")
tmp_wcsprm = _wcs.Wcsprm(
header=tmp_header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
warnings=False,
hdulist=fobj,
)
if naxis is not None:
try:
tmp_wcsprm = tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2
except _wcs.NoWcsKeywordsFoundError:
pass
self.naxis = est_naxis
header = fits.Header.fromstring(header_string)
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(header, fobj, dist="CPDIS", err=minerr)
self._fix_pre2012_scamp_tpv(header)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace("END" + " " * 77, "")
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
try:
wcsprm = _wcs.Wcsprm(
header=header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(
header=None,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if wcsprm.naxis != 2 and (
det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip
):
raise ValueError(
f"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {wcsprm.naxis} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
"""
)
header_naxis = header.get("NAXIS", None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
f"The WCS transformation has more axes ({wcsprm.naxis:d}) than the "
f"image it is associated with ({header_naxis:d})",
FITSFixedWarning,
)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
if header is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
self.fix(translate_units=translate_units)
else:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(
new_copy,
self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2),
)
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(
new_copy,
deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo), deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo), deepcopy(self.det2im2, memo)),
)
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
# We need to know which axes have been dropped, but there is no easy
# way to do this with the .sub function, so instead we assign UUIDs to
# the CNAME parameters in copy.wcs. We can later access the original
# CNAME properties from self.wcs.
cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)]
copy.wcs.cname = cname_uuid
# Subset the WCS
copy.wcs = copy.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
# Construct a list of dimensions from the original WCS in the order
# in which they appear in the final WCS.
keep = [
cname_uuid.index(cname) if cname in cname_uuid else None
for cname in copy.wcs.cname
]
# Restore the original CNAMEs
copy.wcs.cname = ["" if i is None else self.wcs.cname[i] for i in keep]
# Subset pixel_shape and pixel_bounds
if self.pixel_shape:
copy.pixel_shape = tuple(
None if i is None else self.pixel_shape[i] for i in keep
)
if self.pixel_bounds:
copy.pixel_bounds = [
None if i is None else self.pixel_bounds[i] for i in keep
]
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
SCAMP uses TAN projection exclusively. The case of CTYPE ending
in -TAN should have been handled by ``_fix_pre2012_scamp_tpv()`` before
calling this function.
"""
if self.wcs is None:
return
# Delete SIP if CTYPE explicitly has '-TPV' code:
ctype = [ct.strip().upper() for ct in self.wcs.ctype]
if sum(ct.endswith("-TPV") for ct in ctype) == 2:
if self.sip is not None:
self.sip = None
warnings.warn(
"Removed redundant SIP distortion parameters "
+ "because CTYPE explicitly specifies TPV distortions",
FITSFixedWarning,
)
return
# Nothing to be done if no PV parameters attached since SCAMP
# encodes distortion coefficients using PV keywords
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Loop over distinct values of `i' index
has_scamp = False
for i in {v[0] for v in pv}:
# Get all values of `j' index for this value of `i' index
js = tuple(v[1] for v in pv if v[0] == i)
if "-TAN" in self.wcs.ctype[i - 1].upper() and js and max(js) >= 5:
# TAN projection *may* use PVi_j with j up to 4 - see
# Sections 2.5, 2.6, and Table 13
# in https://doi.org/10.1051/0004-6361:20021327
has_scamp = True
break
if has_scamp and all(ct.endswith("-SIP") for ct in ctype):
# Prefer SIP - see recommendations in Section 7 in
# http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf
self.wcs.set_pv([])
warnings.warn(
"Removed redundant SCAMP distortion parameters "
+ "because SIP parameters are also present",
FITSFixedWarning,
)
return
def fix(self, translate_units="", naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array, optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
if (
key == "datfix"
and "1858-11-17" in val
and not np.count_nonzero(self.wcs.mjdref)
):
continue
warnings.warn(
f"'{key}' made the change '{val}'.",
FITSFixedWarning,
)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : (int, int), optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn(
"Need a valid header in order to calculate footprint\n",
AstropyUserWarning,
)
return None
else:
naxis1 = header.get("NAXIS1", None)
naxis2 = header.get("NAXIS2", None)
if naxis1 is None or naxis2 is None:
raise ValueError("Image size could not be determined.")
if center:
corners = np.array(
[[1, 1], [1, naxis2], [naxis1, naxis2], [naxis1, 1]], dtype=np.float64
)
else:
corners = np.array(
[
[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5],
],
dtype=np.float64,
)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header["AXISCORR"]
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = "D2IMDIS"
d_kw = "D2IM"
err_kw = "D2IMERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == "lookup":
del header[distortion]
assert isinstance(fobj, fits.HDUList), (
"An astropy.io.fits.HDUList"
"is required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["D2IMARR", d_extver].data
else:
d_data = (fobj["D2IMARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["D2IMARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn(
"The use of ``AXISCORR`` for D2IM correction has been"
" deprecated.`~astropy.wcs` will read in files with ``AXISCORR`` but"
" ``to_fits()`` will write out files without it.",
AstropyDeprecationWarning,
)
cpdis = [None, None]
crpix = [0.0, 0.0]
crval = [0.0, 0.0]
cdelt = [1.0, 1.0]
try:
d2im_data = fobj[("D2IMARR", 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[("D2IMARR", 1)].header
naxis = d2im_hdr["NAXIS"]
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get("CRPIX" + str(i), 0.0)
crval[i - 1] = d2im_hdr.get("CRVAL" + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get("CDELT" + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`~astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = "D2IMDIS"
d_kw = "D2IM"
def write_d2i(num, det2im):
if det2im is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Detector to image correction type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(det2im.data.shape),
"Number of independent variables in D2IM function",
)
for i in range(det2im.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a D2IM function",
)
image = fits.ImageHDU(det2im.data, name="D2IMARR")
header = image.header
header["CRPIX1"] = (det2im.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (det2im.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
det2im.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
det2im.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (det2im.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (det2im.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist="CPDIS", err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == "CPDIS":
d_kw = "DP"
err_kw = "CPERR"
else:
d_kw = "DQ"
err_kw = "CQERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == "lookup":
if not isinstance(fobj, fits.HDUList):
raise ValueError(
"an astropy.io.fits.HDUList is "
"required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["WCSDVARR", d_extver].data
else:
d_data = (fobj["WCSDVARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["WCSDVARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist="CPDIS"):
"""
Write out `distortion paper`_ keywords to the given
`~astropy.io.fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == "CPDIS":
d_kw = "DP"
else:
d_kw = "DQ"
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Prior distortion function type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(cpdis.data.shape),
f"Number of independent variables in {dist} function",
)
for i in range(cpdis.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a {dist} function",
)
image = fits.ImageHDU(cpdis.data, name="WCSDVARR")
header = image.header
header["CRPIX1"] = (cpdis.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (cpdis.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
cpdis.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
cpdis.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (cpdis.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (cpdis.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _fix_pre2012_scamp_tpv(self, header, wcskey=""):
"""
Replace -TAN with TPV (for pre-2012 SCAMP headers that use -TAN
in CTYPE). Ignore SIP if present. This follows recommendations in
Section 7 in
http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf.
This is to deal with pre-2012 headers that may contain TPV with a
CTYPE that ends in '-TAN' (post-2012 they should end in '-TPV' when
SCAMP has adopted the new TPV convention).
"""
if isinstance(header, (str, bytes)):
return
wcskey = wcskey.strip().upper()
cntype = [
(nax, header.get(f"CTYPE{nax}{wcskey}", "").strip())
for nax in range(1, self.naxis + 1)
]
tan_axes = [ct[0] for ct in cntype if ct[1].endswith("-TAN")]
if len(tan_axes) == 2:
# check if PVi_j with j >= 5 is present and if so, do not load SIP
tan_to_tpv = False
for nax in tan_axes:
js = []
for p in header[f"PV{nax}_*{wcskey}"].keys():
prefix = f"PV{nax}_"
if p.startswith(prefix):
p = p[len(prefix) :]
p = p.rstrip(wcskey)
try:
p = int(p)
except ValueError:
continue
js.append(p)
if js and max(js) >= 5:
tan_to_tpv = True
break
if tan_to_tpv:
warnings.warn(
"Removed redundant SIP distortion parameters "
+ "because SCAMP' PV distortions are also present",
FITSFixedWarning,
)
self._remove_sip_kw(header, del_order=True)
for i in tan_axes:
kwd = f"CTYPE{i:d}{wcskey}"
if kwd in header:
header[kwd] = (
header[kwd].strip().upper().replace("-TAN", "-TPV")
)
@staticmethod
def _remove_sip_kw(header, del_order=False):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in {
m.group() for m in map(SIP_KW.match, list(header)) if m is not None
}:
del header[key]
if del_order:
for kwd in ["A_ORDER", "B_ORDER", "AP_ORDER", "BP_ORDER"]:
if kwd in header:
del header[kwd]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if "A_ORDER" in header and header["A_ORDER"] > 1:
if "B_ORDER" not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion"
)
m = int(header["A_ORDER"])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"A_{i}_{j}"
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header["B_ORDER"])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"B_{i}_{j}"
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header["A_ORDER"]
del header["B_ORDER"]
ctype = [header[f"CTYPE{nax}{wcskey}"] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith("-SIP") for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
"""
log.info(message)
elif "B_ORDER" in header and header["B_ORDER"] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER "
+ "keyword for SIP distortion"
)
else:
a = None
b = None
if "AP_ORDER" in header and header["AP_ORDER"] > 1:
if "BP_ORDER" not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion"
)
m = int(header["AP_ORDER"])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"AP_{i}_{j}"
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header["BP_ORDER"])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"BP_{i}_{j}"
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header["AP_ORDER"]
del header["BP_ORDER"]
elif "BP_ORDER" in header and header["BP_ORDER"] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion"
)
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header:
raise ValueError("Header has SIP keywords without CRPIX keywords")
crpix1 = header.get(f"CRPIX1{wcskey}")
crpix2 = header.get(f"CRPIX2{wcskey}")
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
trdir = "sky to detector" if name[-1] == "P" else "detector to sky"
comment = (
f'SIP polynomial order, axis {ord(name[0]) - ord("A"):d}, {trdir:s}'
)
keywords[f"{name}_ORDER"] = size - 1, comment
comment = "SIP distortion coefficient"
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[f"{name}_{i:d}_{j:d}"] = a[i, j], comment
write_array("A", self.sip.a)
write_array("B", self.sip.b)
write_array("AP", self.sip.ap)
write_array("BP", self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be used as input"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be "
"used as input"
)
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any([x.size == 0 for x in axes]):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other"
)
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == "output":
output = self._normalize_sky(output)
return (
output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape),
)
return [output[:, i].reshape(axes[0].shape) for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
f"of shape (N, {self.naxis})"
)
if 0 in xy.shape:
return xy
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == "output":
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
f"(coords[N][{self.naxis}], origin)"
)
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be "
+ "a 1-D array for each axis, followed by an origin."
)
return _return_list_of_arrays(axes, origin)
raise TypeError(
f"WCS projection has {self.naxis} dimensions, so expected 2 (an Nx{self.naxis} array "
f"and the origin argument) or {self.naxis + 1} arguments (the position in each "
f"dimension, and the origin argument). Instead, {len(args)} arguments were "
"given."
)
def all_pix2world(self, *args, **kwargs):
return self._array_converter(self._all_pix2world, "output", *args, **kwargs)
all_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('sky coordinates, in degrees', 8)}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)["world"], "output", *args, **kwargs
)
wcs_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('world coordinates, in degrees', 8)}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
"""
def _all_world2pix(
self, world, origin, tolerance, maxiter, adaptive, detect_divergence, quiet
):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()["invalid"]
old_over = np.geterr()["over"]
np.seterr(invalid="ignore", over="ignore")
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while np.nanmax(dn) >= tol2 and k < maxiter:
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = dn >= dnprev
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = dn >= tol2
(inddiv,) = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = dn < dnprev
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
(ind,) = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
(ind,) = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while ind.shape[0] > 0 and k < maxiter:
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookkeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = dnnew < dnprev[ind]
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
(subind,) = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
(subind,) = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = (~np.all(np.isfinite(pix), axis=1)) & (
np.all(np.isfinite(world), axis=1)
)
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
(inddiv,) = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
(ind,) = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
f"converge to the requested accuracy after {k:d} "
"iterations.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=None,
)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
f"After {k:d} iterations, the solution is diverging "
"at least for one input point.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=inddiv,
)
return pix
@deprecated_renamed_argument("accuracy", "tolerance", "4.3")
def all_world2pix(
self,
*args,
tolerance=1e-4,
maxiter=20,
adaptive=False,
detect_divergence=True,
quiet=False,
**kwargs,
):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs: self._all_world2pix(
*args,
tolerance=tolerance,
maxiter=maxiter,
adaptive=adaptive,
detect_divergence=detect_divergence,
quiet=quiet,
),
"input",
*args,
**kwargs,
)
all_world2pix.__doc__ = f"""
all_world2pix(*arg, tolerance=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
tolerance : float, optional (default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
"""
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)["pixcrd"], "input", *args, **kwargs
)
wcs_world2pix.__doc__ = f"""
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('naxis', 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = f"""
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('focal coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = f"""
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS('2', 8)}
Returns
-------
{docstrings.RETURNS('pixel coordinates', 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def proj_plane_pixel_scales(self):
"""
Calculate pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This method is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
scale : list of `~astropy.units.Quantity`
A vector of projection plane increments corresponding to each
pixel side (axis).
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import
values = proj_plane_pixel_scales(self)
units = [u.Unit(x) for x in self.wcs.cunit]
return [
value * unit for (value, unit) in zip(values, units)
] # Can have different units
def proj_plane_pixel_area(self):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
area : `~astropy.units.Quantity`
Area (in the projection plane) of the pixel at ``CRPIX`` location.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import
value = proj_plane_pixel_area(self)
unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only
return value * unit
def to_fits(self, relax=False, key=None):
"""
Generate an `~astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14 # Defined by C-ext
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
# Check if we can handle TPD distortion correctly
if _WCS_TPD_WARN_LT71:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}. WCSLIB"
f" {_wcs.__version__} is writing this in a format"
" incompatible with current versions - please update to"
" 7.4 or use the bundled WCSLIB.",
AstropyWarning,
)
elif _WCS_TPD_WARN_LT74:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}, which"
" requires WCSLIB 7.4 or later to store in a FITS header"
f" (having {_wcs.__version__}).",
AstropyWarning,
)
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(
not ctyp.endswith("-SIP") for ctyp in self.wcs.ctype
):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if (
not do_sip
and self.wcs is not None
and any(self.wcs.ctype)
and self.sip is not None
):
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded:"
f" {', '.join(missing_keys)} Use the ``relax`` kwarg to control"
" this.",
AstropyWarning,
)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis + 1):
# strip() must be called here to cover the case of alt key= " "
kw = f"CTYPE{i}{self.wcs.alt}".strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(
self, filename="footprint.reg", color="green", width=2, coordsys=None
):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = (
"# Region file format: DS9 version 4.0 \n"
'# global color=green font="helvetica 12 bold '
"select=1 highlite=1 edit=1 move=1 delete=1 "
"include=1 fixed=0 source\n"
)
coordsys = coordsys or self.wcs.radesys
if coordsys not in (
"PHYSICAL",
"IMAGE",
"FK4",
"B1950",
"FK5",
"J2000",
"GALACTIC",
"ECLIPTIC",
"ICRS",
"LINEAR",
"AMPLIFIER",
"DETECTOR",
):
raise ValueError(
f"Coordinate system '{coordsys}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
)
with open(filename, mode="w") as f:
f.write(comments)
f.write(f"{coordsys}\n")
f.write("polygon(")
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=",")
f.write(f") # color={color}, width={width:d} \n")
def _get_naxis(self, header=None):
_naxis = []
if header is not None and not isinstance(header, (str, bytes)):
for naxis in itertools.count(1):
try:
_naxis.append(header[f"NAXIS{naxis}"])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
"""
Return a short description. Simply porting the behavior from
the `printwcs()` method.
"""
description = ["WCS Keywords\n", f"Number of WCS axes: {self.naxis!r}"]
sfmt = " : " + "".join(["{" + f"{i}" + "!r} " for i in range(self.naxis)])
keywords = ["CTYPE", "CRVAL", "CRPIX"]
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword + sfmt.format(*value))
if hasattr(self.wcs, "pc"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["PC", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = "CDELT" + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, "cd"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["CD", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append(f"NAXIS : {' '.join(map(str, self._naxis))}")
return "\n".join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dict
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError("This WCS object does not have a wcsprm object.")
coordinate_type_map = {0: None, 1: "stokes", 2: "celestial", 3: "spectral"}
scale_map = {
0: "linear",
1: "quantized",
2: "non-linear celestial",
3: "non-linear spectral",
4: "logarithmic",
5: "tabular",
}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult["coordinate_type"] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult["scale"] = scale_map[scale]
group = (axis_type // 10) % 10
subresult["group"] = group
number = axis_type % 10
subresult["number"] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
dct = self.__dict__.copy()
dct["_alt_wcskey"] = self.wcs.alt
return (
__WCS_unpickle__,
(
self.__class__,
dct,
buffer.getvalue(),
),
)
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i + 1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with the same number of axes,
but two swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i + 1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub(
[WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES, WCSSUB_TIME]
) # Defined by C-ext
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, "__len__") and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, "__len__"): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = (
(crpix - iview.start - 1.0) / iview.step
+ 0.5
+ 1.0 / iview.step / 2.0
)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if "indices must be integers" not in str(exc):
raise
warnings.warn(
f"NAXIS{wcs_index} attribute is not updated because at "
f"least one index ('{iview}') is no integer.",
AstropyUserWarning,
)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(
self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix
)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
@property
def axis_type_names(self):
"""
World names for each coordinate axis.
Returns
-------
list of str
A list of names along each axis.
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split("-")[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included.
"""
return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def spectral(self):
"""
A copy of the current WCS with only the spectral axes included.
"""
return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext
@property
def is_spectral(self):
return self.has_spectral and self.naxis == 1
@property
def has_spectral(self):
try:
return self.wcs.spec >= 0
except InconsistentAxisTypesError:
return False
@property
def temporal(self):
"""
A copy of the current WCS with only the time axes included.
"""
if not _WCSSUB_TIME_SUPPORT:
raise NotImplementedError(
"Support for 'temporal' axis requires WCSLIB version 7.8 or "
f"greater but linked WCSLIB version is {_wcs.__version__}"
)
return self.sub([WCSSUB_TIME]) # Defined by C-ext
@property
def is_temporal(self):
return self.has_temporal and self.naxis == 1
@property
def has_temporal(self):
return any(t // 1000 == 4 for t in self.wcs.axis_types)
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (
self.sip is not None
or self.cpdis1 is not None
or self.cpdis2 is not None
or self.det2im1 is not None
and self.det2im2 is not None
)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"cdelt will be ignored since cd is present",
RuntimeWarning,
)
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.dot(cdelt, pc)
return pccd
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
naxis = dct.pop("naxis", None)
if naxis:
hdulist[0].header["naxis"] = naxis
naxes = dct.pop("_naxis", [])
for k, na in enumerate(naxes):
hdulist[0].header[f"naxis{k + 1:d}"] = na
kwargs = dct.pop("_init_kwargs", {})
self.__dict__.update(dct)
wcskey = dct.pop("_alt_wcskey", " ")
WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs)
self.pixel_bounds = dct.get("_pixel_bounds", None)
return self
def find_all_wcs(
header, relax=True, keysel=None, fix=True, translate_units="", _do_set=True
):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or `~astropy.io.fits.Header` object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
keysel : sequence of str, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS`
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError("header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str or file-like or `~astropy.io.fits.HDUList`
The FITS file to validate.
Returns
-------
results : list subclass instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [f" WCS key '{self._key or ' '}':"]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = " - "
else:
initial_indent = " "
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=" ",
)
)
else:
result.append(" No issues.")
return "\n".join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = f" ({self._hdu_name})"
else:
hdu_name = ""
result = [f"HDU {self._hdu_index}{hdu_name}:"]
for wcs in self:
result.append(repr(wcs))
return "\n".join(result)
return ""
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return "\n\n".join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject, fix=False, _do_set=False
)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", FITSFixedWarning, append=True)
try:
WCS(
hdu.header,
hdulist,
key=wcs.wcs.alt or " ",
relax=_wcs.WCSHDR_reject,
fix=True,
_do_set=False,
)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
|
2b51b92ba82aca71dbcf51813d07373f3d5c56ec824f80e0829d117ea809a793 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import enum
import operator
import os
import threading
from datetime import date, datetime, timedelta
from time import strftime
from warnings import warn
import erfa
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.extern import _strptime
from astropy.units import UnitConversionError
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo, data_info_factory
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # noqa: F401
from .formats import (
TIME_DELTA_FORMATS,
TIME_FORMATS,
TimeAstropyTime,
TimeDatetime,
TimeJD,
TimeUnique,
)
from .time_helper.function_helpers import CUSTOM_FUNCTIONS, UNSUPPORTED_FUNCTIONS
from .utils import day_frac
__all__ = [
"TimeBase",
"Time",
"TimeDelta",
"TimeInfo",
"TimeInfoBase",
"update_leap_seconds",
"TIME_SCALES",
"STANDARD_TIME_SCALES",
"TIME_DELTA_SCALES",
"ScaleValueError",
"OperandTypeError",
"TimeDeltaMissingUnitWarning",
]
STANDARD_TIME_SCALES = ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc")
LOCAL_SCALES = ("local",)
TIME_TYPES = {
scale: scales for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales
}
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {
("tai", "tcb"): ("tt", "tdb"),
("tai", "tcg"): ("tt",),
("tai", "ut1"): ("utc",),
("tai", "tdb"): ("tt",),
("tcb", "tcg"): ("tdb", "tt"),
("tcb", "tt"): ("tdb",),
("tcb", "ut1"): ("tdb", "tt", "tai", "utc"),
("tcb", "utc"): ("tdb", "tt", "tai"),
("tcg", "tdb"): ("tt",),
("tcg", "ut1"): ("tt", "tai", "utc"),
("tcg", "utc"): ("tt", "tai"),
("tdb", "ut1"): ("tt", "tai", "utc"),
("tdb", "utc"): ("tt", "tai"),
("tt", "ut1"): ("tai", "utc"),
("tt", "utc"): ("tai",),
}
GEOCENTRIC_SCALES = ("tai", "tt", "tcg")
BARYCENTRIC_SCALES = ("tcb", "tdb")
ROTATIONAL_SCALES = ("ut1",)
TIME_DELTA_TYPES = {
scale: scales
for scales in (
GEOCENTRIC_SCALES,
BARYCENTRIC_SCALES,
ROTATIONAL_SCALES,
LOCAL_SCALES,
)
for scale in scales
}
TIME_DELTA_SCALES = (
GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
)
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {
("tt", "tai"): None,
("tai", "tt"): None,
("tcg", "tt"): -erfa.ELG,
("tt", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcg", "tai"): -erfa.ELG,
("tai", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcb", "tdb"): -erfa.ELB,
("tdb", "tcb"): erfa.ELB / (1.0 - erfa.ELB),
}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
"mean": {
"IAU2006": {"function": erfa.gmst06, "scales": ("ut1", "tt")},
"IAU2000": {"function": erfa.gmst00, "scales": ("ut1", "tt")},
"IAU1982": {"function": erfa.gmst82, "scales": ("ut1",), "include_tio": False},
},
"apparent": {
"IAU2006A": {"function": erfa.gst06a, "scales": ("ut1", "tt")},
"IAU2000A": {"function": erfa.gst00a, "scales": ("ut1", "tt")},
"IAU2000B": {"function": erfa.gst00b, "scales": ("ut1",)},
"IAU1994": {"function": erfa.gst94, "scales": ("ut1",), "include_tio": False},
},
}
class _LeapSecondsCheck(enum.Enum):
NOT_STARTED = 0 # No thread has reached the check
RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held)
DONE = 2 # update_leap_seconds has completed
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED
_LEAP_SECONDS_LOCK = threading.RLock()
def _compress_array_dims(arr):
"""Compress array by allowing at most 2 * edgeitems + 1 in each dimension.
Parameters
----------
arr : array-like
Array to compress.
Returns
-------
out : array-like
Compressed array.
"""
idxs = []
edgeitems = np.get_printoptions()["edgeitems"]
# Build up a list of index arrays for each dimension, allowing no more than
# 2 * edgeitems + 1 elements in each dimension.
for dim in range(arr.ndim):
if arr.shape[dim] > 2 * edgeitems:
# The middle [edgeitems] value does not matter as it gets replaced
# by ... in the output.
idxs.append(
np.concatenate(
[np.arange(edgeitems), [edgeitems], np.arange(-edgeitems, 0)]
)
)
else:
idxs.append(np.arange(arr.shape[dim]))
# Use the magic np.ix_ function to effectively treat each index array as a
# slicing operator.
idxs_ix = np.ix_(*idxs)
out = arr[idxs_ix]
return out
class TimeInfoBase(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
This base class is common between TimeInfo and TimeDeltaInfo.
"""
attr_names = MixinInfo.attr_names | {"serialize_method"}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = (
"format",
"scale",
"precision",
"in_subfmt",
"out_subfmt",
"location",
"_delta_ut1_utc",
"_delta_tdb_tt",
)
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = "value"
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == "formatted_value":
out = ("value",)
elif method == "jd1_jd2":
out = ("jd1", "jd2")
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {
"fits": "jd1_jd2",
"ecsv": "formatted_value",
"hdf5": "jd1_jd2",
"yaml": "jd1_jd2",
"parquet": "jd1_jd2",
None: "jd1_jd2",
}
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
Returns
-------
arrays : list of ndarray
"""
parent = self._parent
jd_approx = parent.jd
jd_remainder = (parent - parent.__class__(jd_approx, format="jd")).jd
return [jd_approx, jd_remainder]
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(
names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats],
)
)
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict(self, map):
if "jd1" in map and "jd2" in map:
# Initialize as JD but revert to desired format and out_subfmt (if needed)
format = map.pop("format")
out_subfmt = map.pop("out_subfmt", None)
map["format"] = "jd"
map["val"] = map.pop("jd1")
map["val2"] = map.pop("jd2")
out = self._parent_cls(**map)
out.format = format
if out_subfmt is not None:
out.out_subfmt = out_subfmt
else:
map["val"] = map.pop("value")
out = self._parent_cls(**map)
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError("input columns have inconsistent locations")
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
tm_attrs = {
attr: getattr(col0, attr) for attr in ("scale", "location", "precision")
}
out = self._parent_cls(jd1, jd2, format="jd", **tm_attrs)
out.format = col0.format
out.out_subfmt = col0.out_subfmt
out.in_subfmt = col0.in_subfmt
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
map = super()._represent_as_dict(attrs=attrs)
# TODO: refactor these special cases into the TimeFormat classes?
# The datetime64 format requires special handling for ECSV (see #12840).
# The `value` has numpy dtype datetime64 but this is not an allowed
# datatype for ECSV. Instead convert to a string representation.
if (
self._serialize_context == "ecsv"
and map["format"] == "datetime64"
and "value" in map
):
map["value"] = map["value"].astype("U")
# The datetime format is serialized as ISO with no loss of precision.
if map["format"] == "datetime" and "value" in map:
map["value"] = np.vectorize(lambda x: x.isoformat())(map["value"])
return map
def _construct_from_dict(self, map):
# See comment above. May need to convert string back to datetime64.
# Note that _serialize_context is not set here so we just look for the
# string value directly.
if (
map["format"] == "datetime64"
and "value" in map
and map["value"].dtype.kind == "U"
):
map["value"] = map["value"].astype("datetime64")
# Convert back to datetime objects for datetime format.
if map["format"] == "datetime" and "value" in map:
from datetime import datetime
map["value"] = np.vectorize(datetime.fromisoformat)(map["value"])
delta_ut1_utc = map.pop("_delta_ut1_utc", None)
delta_tdb_tt = map.pop("_delta_tdb_tt", None)
out = super()._construct_from_dict(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
class TimeDeltaInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_extra_attrs = ("format", "scale")
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd1 = np.zeros(shape, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
out = self._parent_cls(jd1, jd2, format="jd", scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeBase(ShapedLikeNDArray):
"""Base time class from which Time and TimeDelta inherit."""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __getnewargs__(self):
return (self._time,)
def _init_from_vals(
self,
val,
val2,
format,
scale,
copy,
precision=None,
in_subfmt=None,
out_subfmt=None,
):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = "*"
if out_subfmt is None:
out_subfmt = "*"
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError(
"Input val and val2 have inconsistent shape; "
"they cannot be broadcast together."
)
if scale is not None:
if not (isinstance(scale, str) and scale.lower() in self.SCALES):
raise ScaleValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(self.SCALES)}"
)
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(
val, val2, format, scale, precision, in_subfmt, out_subfmt
)
self._format = self._time.name
# Hack from #9969 to allow passing the location value that has been
# collected by the TimeAstropyTime format class up to the Time level.
# TODO: find a nicer way.
if hasattr(self._time, "_location"):
self.location = self._time._location
del self._time._location
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and (
val.dtype.kind in ("S", "U", "O", "M") or val.dtype.names
):
# Input is a string, object, datetime, or a table-like ndarray
# (structured array, recarray). These input types can be
# uniquely identified by the format classes.
formats = [
(name, cls)
for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)
]
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(("astropy_time", TimeAstropyTime))
elif not (isinstance(format, str) and format.lower() in self.FORMATS):
if format is None:
raise ValueError(
"No time format was given, and the input is not unique"
)
else:
raise ValueError(
f"Format {format!r} is not one of the allowed formats "
f"{sorted(self.FORMATS)}"
)
else:
formats = [(format, self.FORMATS[format])]
assert formats
problems = {}
for name, cls in formats:
try:
return cls(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError) as err:
# If ``format`` specified then there is only one possibility, so raise
# immediately and include the upstream exception message to make it
# easier for user to see what is wrong.
if len(formats) == 1:
raise ValueError(
f"Input values did not match the format class {format}:"
+ os.linesep
+ f"{err.__class__.__name__}: {err}"
) from err
else:
problems[name] = err
else:
raise ValueError(
"Input values did not match any of the formats where the format "
f"keyword is optional: {problems}"
) from problems[formats[0][0]]
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format."""
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
format_cls = self.FORMATS[format]
# Get the new TimeFormat object to contain time in new format. Possibly
# coerce in/out_subfmt to '*' (default) if existing subfmt values are
# not valid in the new format.
self._time = format_cls(
self._time.jd1,
self._time.jd2,
self._time._scale,
self.precision,
in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt),
out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
self._format = format
def to_string(self):
"""Output a string representation of the Time or TimeDelta object.
Similar to ``str(self.value)`` (which uses numpy array formatting) but
array values are evaluated only for the items that actually are output.
For large arrays this can be a substantial performance improvement.
Returns
-------
out : str
String representation of the time values.
"""
npo = np.get_printoptions()
if self.size < npo["threshold"]:
out = str(self.value)
else:
# Compress time object by allowing at most 2 * npo["edgeitems"] + 1
# in each dimension. Then force numpy to use "summary mode" of
# showing only the edge items by setting the size threshold to 0.
# TODO: use np.core.arrayprint._leading_trailing if we have support for
# np.concatenate. See #8610.
tm = _compress_array_dims(self)
with np.printoptions(threshold=0):
out = str(tm.value)
return out
def __repr__(self):
return "<{} object: scale='{}' format='{}' value={}>".format(
self.__class__.__name__, self.scale, self.format, self.to_string()
)
def __str__(self):
return self.to_string()
def __hash__(self):
try:
loc = getattr(self, "location", None)
if loc is not None:
loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m)
return hash((self.jd1, self.jd2, self.scale, loc))
except TypeError:
if self.ndim != 0:
reason = "(must be scalar)"
elif self.masked:
reason = "(value is masked)"
else:
raise
raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}")
@property
def scale(self):
"""Time scale."""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
if scale == "utc" or self.scale == "utc":
# If doing a transform involving UTC then check that the leap
# seconds table is up to date.
_check_leapsec()
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = "_get_delta_{}_{}".format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
jd1, jd2 = day_frac(jd1, jd2)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](
jd1,
jd2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
self._time.in_subfmt = val
del self.cache
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
# Setting the out_subfmt property here does validation of ``val``
self._time.out_subfmt = val
del self.cache
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in (
(self._time, "jd1"),
(self._time, "jd2"),
(self, "_delta_ut1_utc"),
(self, "_delta_tdb_tt"),
(self, "location"),
):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
if self._time.jd1.shape:
if isinstance(value, np.ndarray):
return value
else:
raise TypeError(
f"JD is an array ({self._time.jd1!r}) but value is not ({value!r})"
)
else:
# zero-dimensional array, is it safe to unbox?
if (
isinstance(value, np.ndarray)
and not value.shape
and not np.ma.is_masked(value)
):
if value.dtype.kind == "M":
# existing test doesn't want datetime64 converted
return value[()]
elif value.dtype.fields:
# Unpack but keep field names; .item() doesn't
# Still don't get python types in the fields
return value[()]
else:
return value.item()
else:
return value
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
def to_value(self, format, subfmt="*"):
"""Get time values expressed in specified output format.
This method allows representing the ``Time`` object in the desired
output ``format`` and optional sub-format ``subfmt``. Available
built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each
format can have its own sub-formats
For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can
be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with
the enhancement depending on platform), and 'decimal'
:class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the
number of digits is also chosen such that time values are represented
accurately.
For built-in date-like string formats, one of 'date_hms', 'date_hm', or
'date' (or 'longdate_hms', etc., for 5-digit years in
`~astropy.time.TimeFITS`). For sub-formats including seconds, the
number of digits used for the fractional seconds is as set by
`~astropy.time.Time.precision`.
Parameters
----------
format : str
The format in which one wants the time values. Default: the current
format.
subfmt : str or None, optional
Value or wildcard pattern to select the sub-format in which the
values should be given. The default of '*' picks the first
available for a given format, i.e., 'float' or 'date_hms'.
If `None`, use the instance's ``out_subfmt``.
"""
# TODO: add a precision argument (but ensure it is keyword argument
# only, to make life easier for TimeDelta.to_value()).
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
cache = self.cache["format"]
# Try to keep cache behaviour like it was in astropy < 4.0.
key = format if subfmt is None else (format, subfmt)
if key not in cache:
if format == self.format:
tm = self
else:
tm = self.replicate(format=format)
# Some TimeFormat subclasses may not be able to handle being passes
# on a out_subfmt. This includes some core classes like
# TimeBesselianEpochString that do not have any allowed subfmts. But
# those do deal with `self.out_subfmt` internally, so if subfmt is
# the same, we do not pass it on.
kwargs = {}
if subfmt is not None and subfmt != tm.out_subfmt:
kwargs["out_subfmt"] = subfmt
try:
value = tm._time.to_value(parent=tm, **kwargs)
except TypeError as exc:
# Try validating subfmt, e.g. for formats like 'jyear_str' that
# do not implement out_subfmt in to_value() (because there are
# no allowed subformats). If subfmt is not valid this gives the
# same exception as would have occurred if the call to
# `to_value()` had succeeded.
tm._time._select_subfmts(subfmt)
# Subfmt was valid, so fall back to the original exception to see
# if it was lack of support for out_subfmt as a call arg.
if "unexpected keyword argument 'out_subfmt'" in str(exc):
raise ValueError(
f"to_value() method for format {format!r} does not "
"support passing a 'subfmt' argument"
) from None
else:
# Some unforeseen exception so raise.
raise
value = tm._shaped_like_input(value)
cache[key] = value
return cache[key]
@property
def value(self):
"""Time value(s) in current format."""
return self.to_value(self.format, None)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, self.__class__):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0 : idx0 + n_values] = values
out._time.jd1[idx0 + n_values :] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values :] = self._time.jd2[idx0:]
return out
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError(
f"{self.__class__.__name__} object is read-only. Make a "
'copy() or set "writeable" attribute to True.'
)
else:
raise ValueError(
f"scalar {self.__class__.__name__} object is read-only."
)
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ("_delta_tdb_tt", "_delta_ut1_utc"):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def isclose(self, other, atol=None):
"""Returns a boolean or boolean array where two Time objects are
element-wise equal within a time tolerance.
This evaluates the expression below::
abs(self - other) <= atol
Parameters
----------
other : `~astropy.time.Time`
Time object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is two bits in the 128-bit JD time representation,
equivalent to about 40 picosecs.
"""
if atol is None:
# Note: use 2 bits instead of 1 bit based on experience in precision
# tests, since taking the difference with a UTC time means one has
# to do a scale change.
atol = 2 * np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
try:
# Separate these out so user sees where the problem is
dt = self - other
dt = abs(dt)
out = dt <= atol
except Exception as err:
raise TypeError(
"'other' argument must support subtraction with Time "
"and return a value that supports comparison with "
f"{atol.__class__.__name__}: {err}"
)
return out
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply("copy", format=format)
def replicate(self, format=None, copy=False, cls=None):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply("copy" if copy else "replicate", format=format, cls=cls)
def _apply(self, method, *args, format=None, cls=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == "replicate":
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(cls or self.__class__)
tm._time = TimeJD(
jd1,
jd2,
self.scale,
precision=0,
in_subfmt="*",
out_subfmt="*",
from_jd=True,
)
# Optional ndarray attributes.
for attr in ("_delta_ut1_utc", "_delta_tdb_tt", "location"):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only an array scalar and the method would return a view,
# since in that case nothing would change).
if getattr(val, "shape", ()):
val = apply_method(val)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined and the
# time object is not a scalar (issue #10688).
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError(f"format must be one of {list(tm.FORMATS)}")
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(
tm._time.jd1,
tm._time.jd2,
tm._time._scale,
precision=self.precision,
in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt),
out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
index = [
indices
if i == axis
else np.arange(s).reshape(
(1,) * (i if keepdims or i < axis else i - 1)
+ (s,)
+ (1,) * (ndim - i - (1 if keepdims or i > axis else 2))
)
for i, s in enumerate(self.shape)
]
return tuple(index)
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# First get the minimum at normal precision.
jd1, jd2 = self.jd1, self.jd2
approx = np.min(jd1 + jd2, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (jd1 - approx) + jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = np.max(jd1 + jd2, axis, keepdims=True)
dt = (jd1 - approx) + jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = jd1 + jd2
remainder = (jd1 - approx) + jd2
if axis is None:
return np.lexsort((remainder.ravel(), approx.ravel()))
else:
return np.lexsort(keys=(remainder, approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)]
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
"""Mean along a given axis.
This is similar to :meth:`~numpy.ndarray.mean`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2`` is
used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.mean``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
Similarly, the ``dtype`` argument is also present for compatibility
only; it has no meaning for `Time`.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
dtype : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
out : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for
details.
Returns
-------
m : Time
A new Time instance containing the mean values
"""
if dtype is not None:
raise ValueError("Cannot set ``dtype`` on `Time` instances")
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
where = where & ~self.mask
where_broadcasted = np.broadcast_to(where, self.shape)
kwargs = dict(
axis=axis,
keepdims=keepdims,
where=where,
)
divisor = np.sum(where_broadcasted, axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
raise ValueError(
"Mean over zero elements is not supported as it would give an undefined"
" time;see issue https://github.com/astropy/astropy/issues/6509"
)
jd1, jd2 = day_frac(
val1=np.sum(np.ma.getdata(self.jd1), **kwargs),
val2=np.sum(np.ma.getdata(self.jd2), **kwargs),
divisor=divisor,
)
result = type(self)(
val=jd1,
val2=jd2,
format="jd",
scale=self.scale,
copy=False,
)
result.format = self.format
return result
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache["scale"]
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
return self.to_value(attr, subfmt=None)
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError(
"Cannot convert TimeDelta with "
"undefined scale to any defined scale."
)
else:
raise ScaleValueError(
f"Cannot convert {self.__class__.__name__} with scale "
f"'{self.scale}' to scale '{attr}'"
)
else:
# Should raise AttributeError
return self.__getattribute__(attr)
def __dir__(self):
return sorted(set(super().__dir__()) | set(self.SCALES) | set(self.FORMATS))
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError(
"Attribute shape must match or be broadcastable to that of "
"Time object. Typically, give either a single value or "
"one for each time."
)
return val
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented.
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError(
f"Cannot compare {self.__class__.__name__} instances with "
f"scales '{self.scale}' and '{other.scale}'"
)
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.0)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
class Time(TimeBase):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Unix glob to select subformats for parsing input times
out_subfmt : str, optional
Unix glob to select subformat for outputting times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
If not given, assumed to be the center of the Earth for time scale
transformations to and from the solar-system barycenter.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, Time):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(
self,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
if not hasattr(self, "location"):
self.location = None
if isinstance(val, Time):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(
val, val2, format, scale, copy, precision, in_subfmt, out_subfmt
)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (
self.location.size > 1 and self.location.shape != self.shape
):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape, subok=True)
except Exception as err:
raise ValueError(
f"The location with shape {self.location.shape} cannot be "
f"broadcast against time with shape {self.shape}. "
"Typically, either give a single location or one for each time."
) from err
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object."""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif (self_location is None and value.location is not None) or (
self_location is not None and value.location is None
):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError(
"cannot set to Time with different location: expected "
f"location={self_location} and got location={value.location}"
)
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(
value,
scale=self.scale,
format=self.format,
location=self_location,
)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible Time object: {err}"
)
return value
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime : :class:`~astropy.time.Time`
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format="datetime", scale="utc")
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : str, sequence, or ndarray
Objects containing time data of type string
format_string : str
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ("U", "S"):
raise TypeError(
"Expected type is string, a bytes-like object or a sequence "
f"of these. Got dtype '{time_array.dtype.kind}'"
)
to_string = (
str
if time_array.dtype.kind == "U"
else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, "U30"])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}".format(
*time_tuple
)
format = kwargs.pop("format", None)
out = cls(*iterator.operands[1:], format="isot", **kwargs)
if format is not None:
out.format = format
return out
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : str
Format definition of return string.
Returns
-------
formatted : str or numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate("iso")._time.str_kwargs():
date_tuple = date(sk["year"], sk["mon"], sk["day"]).timetuple()
datetime_tuple = (
sk["year"],
sk["mon"],
sk["day"],
sk["hour"],
sk["min"],
sk["sec"],
date_tuple[6],
date_tuple[7],
-1,
)
fmtd_str = format_spec
if "%f" in fmtd_str:
fmtd_str = fmtd_str.replace(
"%f",
"{frac:0{precision}}".format(
frac=sk["fracsec"], precision=self.precision
),
)
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
def light_travel_time(
self, skycoord, kind="barycentric", location=None, ephemeris=None
):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ("barycentric", "heliocentric"):
raise ValueError(
"'kind' parameter must be one of 'heliocentric' or 'barycentric'"
)
if location is None:
if self.location is None:
raise ValueError(
"An EarthLocation needs to be set or passed in to calculate bary- "
"or heliocentric corrections"
)
location = self.location
from astropy.coordinates import (
GCRS,
HCRS,
ICRS,
CartesianRepresentation,
UnitSphericalRepresentation,
solar_system_ephemeris,
)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError(
"Supplied location does not have a valid `get_itrs` method"
)
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == "heliocentric":
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (
skycoord.icrs.represent_as(UnitSphericalRepresentation)
.represent_as(CartesianRepresentation)
.xyz
)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale="tdb")
def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
"""
if isinstance(longitude, str) and longitude == "tio":
longitude = 0
include_tio = False
else:
include_tio = True
return self._sid_time_or_earth_rot_ang(
longitude=longitude,
function=erfa.era00,
scales=("ut1",),
include_tio=include_tio,
)
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # (docstring is formatted below)
if kind.lower() not in SIDEREAL_TIME_MODELS:
raise ValueError(
"The kind of sidereal time has to be "
+ " or ".join(sorted(SIDEREAL_TIME_MODELS))
)
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models)[-1]
elif model.upper() not in available_models:
raise ValueError(
f"Model {model} not implemented for {kind} sidereal time; "
f"available models are {sorted(available_models)}"
)
model_kwargs = available_models[model.upper()]
if isinstance(longitude, str) and longitude in ("tio", "greenwich"):
longitude = 0
model_kwargs = model_kwargs.copy()
model_kwargs["include_tio"] = False
return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
"apparent",
sorted(SIDEREAL_TIME_MODELS["apparent"]),
"mean",
sorted(SIDEREAL_TIME_MODELS["mean"]),
)
def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True):
"""Calculate a local sidereal time or Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance.
function : callable
The ERFA function to use.
scales : tuple of str
The time scales that the function requires on input.
include_tio : bool, optional
Whether to includes the TIO locator corrected for polar motion.
Should be `False` for pre-2000 IAU models. Default: `True`.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time or Earth rotation angle, with units of hourangle.
"""
from astropy.coordinates import EarthLocation, Longitude
from astropy.coordinates.builtin_frames.utils import get_polar_motion
from astropy.coordinates.matrix_utilities import rotation_matrix
if longitude is None:
if self.location is None:
raise ValueError(
"No longitude is given but the location for "
"the Time object is not set."
)
longitude = self.location.lon
elif isinstance(longitude, EarthLocation):
longitude = longitude.lon
else:
# Sanity check on input; default unit is degree.
longitude = Longitude(longitude, u.degree, copy=False)
theta = self._call_erfa(function, scales)
if include_tio:
# TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio;
# maybe posisble to factor out to one or the other.
sp = self._call_erfa(erfa.sp00, ("tt",))
xp, yp = get_polar_motion(self)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (
rotation_matrix(longitude, "z")
@ rotation_matrix(-yp, "x", unit=u.radian)
@ rotation_matrix(-xp, "y", unit=u.radian)
@ rotation_matrix(theta + sp, "z", unit=u.radian)
)
# Solve for angle.
angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian
else:
angle = longitude + (theta << u.radian)
return Longitude(angle, u.hourangle)
def _call_erfa(self, function, scales):
# TODO: allow erfa functions to be used on Time with __array_ufunc__.
erfa_parameters = [
getattr(getattr(self, scale)._time, jd_part)
for scale in scales
for jd_part in ("jd1", "jd2_filled")
]
result = function(*erfa_parameters)
if self.masked:
result[self.mask] = np.nan
return result
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : `~astropy.utils.iers.IERS`, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. Default: `~astropy.utils.iers.earth_orientation_table`
(which in turn defaults to the combined version provided by
`~astropy.utils.iers.IERS_Auto`).
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA
>>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, "_delta_ut1_utc"):
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = "utc"
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == "ut1":
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, "_delta_tdb_tt"):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ("tt", "tdb"):
raise ValueError(
"Accessing the delta_tdb_tt attribute is only "
"possible for TT or TDB time scales"
)
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
# Assume geocentric.
self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0.0, 0.0, 0.0)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1,
jd2,
ut,
lon.to_value(u.radian),
rxy.to_value(u.km),
z.to_value(u.km),
)
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = not isinstance(other, Time)
if other_is_delta: # T - Tdelta
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot subtract Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError(
"Cannot subtract Time instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
self_time = (
self._time if self.scale in TIME_DELTA_SCALES else self.tai._time
)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(
self_time.jd1, self_time.jd2, format="jd", scale=self_time.scale
)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
# T + Tdelta = T
# T + T = error
if isinstance(other, Time):
raise OperandTypeError(self, other, "+")
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot add Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
# Reverse addition is possible: <something-Tdelta-ish> + T
# but there is no case of <something> - T, so no __rsub__.
def __radd__(self, other):
return self.__add__(other)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
scale = self.scale
if scale == "utc":
self = self.tai
result = super().mean(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
if scale == "utc":
result = result.utc
result.out_subfmt = self.out_subfmt
location = self.location
if self.location is not None:
if self.location.shape:
if axis is None:
axis_normalized = tuple(range(self.ndim))
elif isinstance(axis, int):
axis_normalized = (axis,)
else:
axis_normalized = axis
sl = [slice(None)] * self.location.ndim
for a in axis_normalized:
sl[a] = slice(0, 1)
if np.any(self.location != self.location[tuple(sl)]):
raise ValueError(
"`location` must be constant over the reduction axes."
)
if not keepdims:
for a in axis_normalized:
sl[a] = 0
location = self.location[tuple(sl)]
result.location = location
return result
def __array_function__(self, function, types, args, kwargs):
"""
Wrap numpy functions.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
"""
if function in CUSTOM_FUNCTIONS:
f = CUSTOM_FUNCTIONS[function]
return f(*args, **kwargs)
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
return super().__array_function__(function, types, args, kwargs)
def to_datetime(self, timezone=None):
# TODO: this could likely go through to_value, as long as that
# had an **kwargs part that was just passed on to _time.
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning):
"""Warning for missing unit or format in TimeDelta."""
pass
class TimeDelta(TimeBase):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
For more information see:
- https://docs.astropy.org/en/stable/time/
- https://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s). For numerical inputs without units,
"jd" is assumed and values are interpreted as days.
A deprecation warning is raised in this case. To avoid the warning,
either specify the format or add units to the input values.
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, TimeDelta):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
format = format or self._get_format(val)
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
@staticmethod
def _get_format(val):
if isinstance(val, timedelta):
return "datetime"
if getattr(val, "unit", None) is None:
warn(
"Numerical value without unit or explicit format passed to"
" TimeDelta, assuming days",
TimeDeltaMissingUnitWarning,
)
return "jd"
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1,
jd2 + offset2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
def _add_sub(self, other, op):
"""Perform common elements of addition / subtraction for two delta times."""
# If not a TimeDelta then see if it can be turned into a TimeDelta.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
raise TypeError(
"Cannot add TimeDelta instances with scales '{}' and '{}'".format(
self.scale, other.scale
)
)
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = op(self._time.jd1, other._time.jd1)
jd2 = op(self._time.jd2, other._time.jd2)
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __add__(self, other):
# If other is a Time then use Time.__add__ to do the calculation.
if isinstance(other, Time):
return other.__add__(self)
return self._add_sub(other, operator.add)
def __sub__(self, other):
# TimeDelta - Time is an error
if isinstance(other, Time):
raise OperandTypeError(self, other, "-")
return self._add_sub(other, operator.sub)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, "*")
elif (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, unit, equivalencies=[]):
"""
Convert to a quantity in the specified unit.
Parameters
----------
unit : unit-like
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globallyq
or within a context.
Returns
-------
quantity : `~astropy.units.Quantity`
The quantity in the units specified.
See Also
--------
to_value : get the numerical value in a given unit.
"""
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(
unit, equivalencies=equivalencies
)
def to_value(self, *args, **kwargs):
"""Get time delta values expressed in specified output format or unit.
This method is flexible and handles both conversion to a specified
``TimeDelta`` format / sub-format AND conversion to a specified unit.
If positional argument(s) are provided then the first one is checked
to see if it is a valid ``TimeDelta`` format, and next it is checked
to see if it is a valid unit or unit string.
To convert to a ``TimeDelta`` format and optional sub-format the options
are::
tm = TimeDelta(1.0 * u.s)
tm.to_value('jd') # equivalent of tm.jd
tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object
tm.to_value('jd', subfmt='decimal')
tm.to_value(format='jd', subfmt='decimal')
To convert to a unit with optional equivalencies, the options are::
tm.to_value('hr') # convert to u.hr (hours)
tm.to_value('hr', []) # specify equivalencies as a positional arg
tm.to_value('hr', equivalencies=[])
tm.to_value(unit='hr', equivalencies=[])
The built-in `~astropy.time.TimeDelta` options for ``format`` are:
{'jd', 'sec', 'datetime'}.
For the two numerical formats 'jd' and 'sec', the available ``subfmt``
options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with the
enhancement depending on platform), and 'decimal' instances of
:class:`decimal.Decimal` for full precision. For the 'str' and 'bytes'
sub-formats, the number of digits is also chosen such that time values
are represented accurately. Default: as set by ``out_subfmt`` (which by
default picks the first available for a given format, i.e., 'float').
Parameters
----------
format : str, optional
The format in which one wants the `~astropy.time.TimeDelta` values.
Default: the current format.
subfmt : str, optional
Possible sub-format in which the values should be given. Default: as
set by ``out_subfmt`` (which by default picks the first available
for a given format, i.e., 'float' or 'date_hms').
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globally or
within a context.
Returns
-------
value : ndarray or scalar
The value in the format or units specified.
See Also
--------
to : Convert to a `~astropy.units.Quantity` instance in a given unit.
value : The time value in the current format.
"""
if not (args or kwargs):
raise TypeError("to_value() missing required format or unit argument")
# TODO: maybe allow 'subfmt' also for units, keeping full precision
# (effectively, by doing the reverse of quantity_day_frac)?
# This way, only equivalencies could lead to possible precision loss.
if "format" in kwargs or (
args != () and (args[0] is None or args[0] in self.FORMATS)
):
# Super-class will error with duplicate arguments, etc.
return super().to_value(*args, **kwargs)
# With positional arguments, we try parsing the first one as a unit,
# so that on failure we can give a more informative exception.
if args:
try:
unit = u.Unit(args[0])
except ValueError as exc:
raise ValueError(
"first argument is not one of the known "
f"formats ({list(self.FORMATS)}) and failed to parse as a unit."
) from exc
args = (unit,) + args[1:]
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(
*args, **kwargs
)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object."""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible TimeDelta object: {err}"
)
return value
def isclose(self, other, atol=None, rtol=0.0):
"""Returns a boolean or boolean array where two TimeDelta objects are
element-wise equal within a time tolerance.
This effectively evaluates the expression below::
abs(self - other) <= atol + rtol * abs(other)
Parameters
----------
other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Quantity or TimeDelta object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is one bit in the 128-bit JD time representation,
equivalent to about 20 picosecs.
rtol : float
Relative tolerance for equality
"""
try:
other_day = other.to_value(u.day)
except Exception as err:
raise TypeError(f"'other' argument must support conversion to days: {err}")
if atol is None:
atol = np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
return np.isclose(
self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)
)
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time):
dtype = object
else:
dtype = None
val = np.array(val, copy=copy, subok=True, dtype=dtype)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize:
pass
elif val.dtype.kind in "OSUMaV":
pass
else:
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = "" if op is None else f" for {op}"
super().__init__(
"Unsupported operand type(s){}: '{}' and '{}'".format(
op_string, left.__class__.__name__, right.__class__.__name__
)
)
def _check_leapsec():
global _LEAP_SECONDS_CHECK
if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE:
with _LEAP_SECONDS_LOCK:
# There are three ways we can get here:
# 1. First call (NOT_STARTED).
# 2. Re-entrant call (RUNNING). We skip the initialisation
# and don't worry about leap second errors.
# 3. Another thread which raced with the first call
# (RUNNING). The first thread has relinquished the
# lock to us, so initialization is complete.
if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED:
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING
update_leap_seconds()
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE
def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn(
f"leap-second auto-update failed due to the following exception: {exc!r}",
AstropyWarning,
)
return 0
|
dd6ff136916e173ab11411389070604d5235affac150c1aab1837c39999c7886 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import datetime
import fnmatch
import re
import time
import warnings
from collections import OrderedDict, defaultdict
from decimal import Decimal
import erfa
import numpy as np
import astropy.units as u
from astropy.utils.decorators import classproperty, lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from . import _parse_times, conf, utils
from .utils import day_frac, quantity_day_frac, two_product, two_sum
__all__ = [
"TimeFormat",
"TimeJD",
"TimeMJD",
"TimeFromEpoch",
"TimeUnix",
"TimeUnixTai",
"TimeCxcSec",
"TimeGPS",
"TimeDecimalYear",
"TimePlotDate",
"TimeUnique",
"TimeDatetime",
"TimeString",
"TimeISO",
"TimeISOT",
"TimeFITS",
"TimeYearDayTime",
"TimeEpochDate",
"TimeBesselianEpoch",
"TimeJulianEpoch",
"TimeDeltaFormat",
"TimeDeltaSec",
"TimeDeltaJD",
"TimeEpochDateString",
"TimeBesselianEpochString",
"TimeJulianEpochString",
"TIME_FORMATS",
"TIME_DELTA_FORMATS",
"TimezoneInfo",
"TimeDeltaDatetime",
"TimeDatetime64",
"TimeYMDHMS",
"TimeNumeric",
"TimeDeltaNumeric",
]
__doctest_skip__ = ["TimePlotDate"]
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {
"TDT": "tt",
"ET": "tt",
"GMT": "utc",
"UT": "utc",
"IAT": "tai",
}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
if isinstance(subfmt_in, str):
for strptime_code, regex in (
("%Y", r"(?P<year>\d\d\d\d)"),
("%m", r"(?P<mon>\d{1,2})"),
("%d", r"(?P<mday>\d{1,2})"),
("%H", r"(?P<hour>\d{1,2})"),
("%M", r"(?P<min>\d{1,2})"),
("%S", r"(?P<sec>\d{1,2})"),
):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if "%" not in subfmt_in:
subfmt_tuple = (
subfmt_tuple[0],
re.compile(subfmt_in + "$"),
subfmt_tuple[2],
)
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormat:
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = "utc" # As of astropy 0.4
subfmts = ()
_registry = TIME_FORMATS
def __init__(
self, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False
):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
self._jd1, self._jd2 = None, None
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __init_subclass__(cls, **kwargs):
# Register time formats that define a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if "name" in cls.__dict__ and cls.name != "astropy_time":
# FIXME: check here that we're not introducing a collision with
# an existing method or attribute; problem is it could be either
# astropy.time.Time or astropy.time.TimeDelta, and at the point
# where this is run neither of those classes have necessarily been
# constructed yet.
if "value" in cls.__dict__ and not hasattr(cls.value, "fget"):
raise ValueError("If defined, 'value' must be a property")
cls._registry[cls.name] = cls
# If this class defines its own subfmts, preprocess the definitions.
if "subfmts" in cls.__dict__:
cls.subfmts = _regexify_subfmts(cls.subfmts)
return super().__init_subclass__(**kwargs)
@classmethod
def _get_allowed_subfmt(cls, subfmt):
"""Get an allowed subfmt for this class, either the input ``subfmt``
if this is valid or '*' as a default. This method gets used in situations
where the format of an existing Time object is changing and so the
out_ or in_subfmt may need to be coerced to the default '*' if that
``subfmt`` is no longer valid.
"""
try:
cls._select_subfmts(subfmt)
except ValueError:
subfmt = "*"
return subfmt
@property
def in_subfmt(self):
return self._in_subfmt
@in_subfmt.setter
def in_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._in_subfmt = subfmt
@property
def out_subfmt(self):
return self._out_subfmt
@out_subfmt.setter
def out_subfmt(self, subfmt):
# Validate subfmt value for this class, raises ValueError if not.
self._select_subfmts(subfmt)
self._out_subfmt = subfmt
@property
def jd1(self):
return self._jd1
@jd1.setter
def jd1(self, jd1):
self._jd1 = _validate_jd_for_storage(jd1)
if self._jd2 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
@property
def jd2(self):
return self._jd2
@jd2.setter
def jd2(self, jd2):
self._jd2 = _validate_jd_for_storage(jd2)
if self._jd1 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale."""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if "mask" not in self.cache:
self.cache["mask"] = np.isnan(self.jd2)
if self.cache["mask"].shape:
self.cache["mask"].flags.writeable = False
return self.cache["mask"]
@property
def masked(self):
if "masked" not in self.cache:
self.cache["masked"] = bool(np.any(self.mask))
return self.cache["masked"]
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, val):
# Verify precision is 0-9 (inclusive)
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError("precision attribute must be an int between 0 and 9")
self._precision = val
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
# val1 cannot contain nan, but val2 can contain nan
isfinite1 = np.isfinite(val1)
if val1.size > 1: # Calling .all() on a scalar is surprisingly slow
isfinite1 = (
isfinite1.all()
) # Note: arr.all() about 3x faster than np.all(arr)
elif val1.size == 0:
isfinite1 = False
ok1 = (
val1.dtype.kind == "f"
and val1.dtype.itemsize >= 8
and isfinite1
or val1.size == 0
)
ok2 = (
val2 is None
or (
val2.dtype.kind == "f"
and val2.dtype.itemsize >= 8
and not np.any(np.isinf(val2))
)
or val2.size == 0
)
if not (ok1 and ok2):
raise TypeError(
f"Input values for {self.name} class must be finite doubles"
)
if getattr(val1, "unit", None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without losing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances."
)
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1.0 / getattr(self, "unit", 1.0)
if factor != 1.0:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, "unit", None) is not None:
raise TypeError("Cannot mix float and Quantity inputs")
if val2 is None:
val2 = np.array(0, dtype=val1.dtype)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError(
f"Scale value '{scale}' not in allowed values {TIME_SCALES}"
)
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2 in specified
``out_subfmt``.
This is the base method that ignores ``parent`` and uses the ``value``
property to compute the output. This is done by temporarily setting
``self.out_subfmt`` and calling ``self.value``. This is required for
legacy Format subclasses prior to astropy 4.0 New code should instead
implement the value functionality in ``to_value()`` and then make the
``value`` property be a simple call to ``self.to_value()``.
Parameters
----------
parent : object
Parent `~astropy.time.Time` object associated with this
`~astropy.time.TimeFormat` object
out_subfmt : str or None
Output subformt (use existing self.out_subfmt if `None`)
Returns
-------
value : numpy.array, numpy.ma.array
Array or masked array of formatted time representation values
"""
# Get value via ``value`` property, overriding out_subfmt temporarily if needed.
if out_subfmt is not None:
out_subfmt_orig = self.out_subfmt
try:
self.out_subfmt = out_subfmt
value = self.value
finally:
self.out_subfmt = out_subfmt_orig
else:
value = self.value
return self.mask_if_needed(value)
@property
def value(self):
raise NotImplementedError
@classmethod
def _select_subfmts(cls, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
If no subformat matches pattern then a ValueError is raised. A special
case is a format with no allowed subformats, i.e. subfmts=(), and
pattern='*'. This is OK and happens when this method is used for
validation of an out_subfmt.
"""
if not isinstance(pattern, str):
raise ValueError("subfmt attribute must be a string")
elif pattern == "*":
return cls.subfmts
subfmts = [x for x in cls.subfmts if fnmatch.fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
if len(cls.subfmts) == 0:
raise ValueError(f"subformat not allowed for format {cls.name}")
else:
subfmt_names = [x[0] for x in cls.subfmts]
raise ValueError(
f"subformat {pattern!r} must match one of "
f"{subfmt_names} for format {cls.name}"
)
return subfmts
class TimeNumeric(TimeFormat):
subfmts = (
("float", np.float64, None, np.add),
("long", np.longdouble, utils.longdouble_to_twoval, utils.twoval_to_longdouble),
("decimal", np.object_, utils.decimal_to_twoval, utils.twoval_to_decimal),
("str", np.str_, utils.decimal_to_twoval, utils.twoval_to_string),
("bytes", np.bytes_, utils.bytes_to_twoval, utils.twoval_to_bytes),
)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
# Save original state of val2 because the super()._check_val_type below
# may change val2 from None to np.array(0). The value is saved in order
# to prevent a useless and slow call to np.result_type() below in the
# most common use-case of providing only val1.
orig_val2_is_none = val2 is None
if val1.dtype.kind == "f":
val1, val2 = super()._check_val_type(val1, val2)
elif not orig_val2_is_none or not (
val1.dtype.kind in "US"
or (
val1.dtype.kind == "O"
and all(isinstance(v, Decimal) for v in val1.flat)
)
):
raise TypeError(
f"for {self.name} class, input should be doubles, string, or Decimal, "
"and second values are only allowed for doubles."
)
val_dtype = (
val1.dtype if orig_val2_is_none else np.result_type(val1.dtype, val2.dtype)
)
subfmts = self._select_subfmts(self.in_subfmt)
for subfmt, dtype, convert, _ in subfmts:
if np.issubdtype(val_dtype, dtype):
break
else:
raise ValueError("input type not among selected sub-formats.")
if convert is not None:
try:
val1, val2 = convert(val1, val2)
except Exception:
raise TypeError(
f"for {self.name} class, input should be (long) doubles, string, "
"or Decimal, and second values are only allowed for "
"(long) doubles."
)
return val1, val2
def to_value(self, jd1=None, jd2=None, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2.
Subclasses that require ``parent`` or to adjust the jds should
override this method.
"""
# TODO: do this in __init_subclass__?
if self.__class__.value.fget is not self.__class__.to_value:
return self.value
if jd1 is None:
jd1 = self.jd1
if jd2 is None:
jd2 = self.jd2
if out_subfmt is None:
out_subfmt = self.out_subfmt
subfmt = self._select_subfmts(out_subfmt)[0]
kwargs = {}
if subfmt[0] in ("str", "bytes"):
unit = getattr(self, "unit", 1)
digits = int(np.ceil(np.log10(unit / np.finfo(float).eps)))
# TODO: allow a way to override the format.
kwargs["fmt"] = f".{digits}f"
value = subfmt[3](jd1, jd2, **kwargs)
return self.mask_if_needed(value)
value = property(to_value)
class TimeJD(TimeNumeric):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = "jd"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
class TimeMJD(TimeNumeric):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = "mjd"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h).
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd1 = self.jd1 - erfa.DJM0 # This cannot lose precision.
jd2 = self.jd2
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDecimalYear(TimeNumeric):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = "decimalyear"
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format="jd")
t_end = Time(jd1_end, jd2_end, scale=self.scale, format="jd")
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
def to_value(self, **kwargs):
scale = self.scale.upper().encode("ascii")
iy_start, ims, ids, ihmsfs = erfa.d2dtf(
scale, 0, self.jd1, self.jd2_filled # precision=0
)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode("ascii")
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, ihr, imin, isec)
# Trying to be precise, but more than float64 not useful.
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return super().to_value(jd1=decimalyear, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeFromEpoch(TimeNumeric):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
@classproperty(lazy=True)
def _epoch(cls):
# Ideally we would use `def epoch(cls)` here and not have the instance
# property below. However, this breaks the sphinx API docs generation
# in a way that was not resolved. See #10406 for details.
return Time(
cls.epoch_val,
cls.epoch_val2,
scale=cls.epoch_scale,
format=cls.epoch_format,
)
@property
def epoch(self):
"""Reference epoch time from which the time interval is measured."""
return self._epoch
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1.0 / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# For the usual case that scale is the same as epoch_scale, we only need
# to ensure that abs(jd2) <= 0.5. Since abs(self.epoch.jd2) <= 0.5 and
# abs(frac) <= 0.5, we can do simple (fast) checks and arithmetic here
# without another call to day_frac(). Note also that `round(jd2.item())`
# is about 10x faster than `np.round(jd2)`` for a scalar.
if self.epoch.scale == self.scale:
jd1_extra = np.round(jd2) if jd2.shape else round(jd2.item())
jd1 += jd1_extra
jd2 -= jd1_extra
self.jd1, self.jd2 = jd1, jd2
return
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(
Time(jd1, jd2, scale=self.epoch_scale, format="jd"), self.scale
)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale '{self.epoch_scale}' "
f"to specified scale '{self.scale}', got error:\n{err}"
) from err
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None, **kwargs):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError("cannot compute value without parent Time object")
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale "
f"'{self.epoch_scale}' to specified scale '{self.scale}', "
f"got error:\n{err}"
) from err
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
# This factor is guaranteed to be exactly representable, which
# means time_from_epoch1 is calculated exactly.
factor = 1.0 / self.unit
time_from_epoch1 = (jd1 - self.epoch.jd1) * factor
time_from_epoch2 = (jd2 - self.epoch.jd2) * factor
return super().to_value(jd1=time_from_epoch1, jd2=time_from_epoch2, **kwargs)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time (UTC): seconds from 1970-01-01 00:00:00 UTC, ignoring leap seconds.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = "unix"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1970-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "iso"
class TimeUnixTai(TimeUnix):
"""
Unix time (TAI): SI seconds elapsed since 1970-01-01 00:00:00 TAI (see caveats).
This will generally differ from standard (UTC) Unix time by the cumulative
integral number of leap seconds introduced into UTC since 1972-01-01 UTC
plus the initial offset of 10 seconds at that date.
This convention matches the definition of linux CLOCK_TAI
(https://www.cl.cam.ac.uk/~mgk25/posix-clocks.html),
and the Precision Time Protocol
(https://en.wikipedia.org/wiki/Precision_Time_Protocol), which
is also used by the White Rabbit protocol in High Energy Physics:
https://white-rabbit.web.cern.ch.
Caveats:
- Before 1972, fractional adjustments to UTC were made, so the difference
between ``unix`` and ``unix_tai`` time is no longer an integer.
- Because of the fractional adjustments, to be very precise, ``unix_tai``
is the number of seconds since ``1970-01-01 00:00:00 TAI`` or equivalently
``1969-12-31 23:59:51.999918 UTC``. The difference between TAI and UTC
at that epoch was 8.000082 sec.
- On the day of a positive leap second the difference between ``unix`` and
``unix_tai`` times increases linearly through the day by 1.0. See also the
documentation for the `~astropy.time.TimeUnix` class.
- Negative leap seconds are possible, though none have been needed to date.
Examples
--------
>>> # get the current offset between TAI and UTC
>>> from astropy.time import Time
>>> t = Time('2020-01-01', scale='utc')
>>> t.unix_tai - t.unix
37.0
>>> # Before 1972, the offset between TAI and UTC was not integer
>>> t = Time('1970-01-01', scale='utc')
>>> t.unix_tai - t.unix # doctest: +FLOAT_CMP
8.000082
>>> # Initial offset of 10 seconds in 1972
>>> t = Time('1972-01-01', scale='utc')
>>> t.unix_tai - t.unix
10.0
"""
name = "unix_tai"
epoch_val = "1970-01-01 00:00:00"
epoch_scale = "tai"
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = "cxcsec"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1998-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "tt"
epoch_format = "iso"
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
-----
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer
"""
name = "gps"
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = "1980-01-06 00:00:19"
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = "tai"
epoch_format = "iso"
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC.
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = "plot_date"
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "jd"
@lazyproperty
def epoch(self):
"""Reference epoch time from which the time interval is measured."""
try:
# Matplotlib >= 3.3 has a get_epoch() function
from matplotlib.dates import get_epoch
except ImportError:
# If no get_epoch() then the epoch is '0001-01-01'
_epoch = self._epoch
else:
# Get the matplotlib date epoch as an ISOT string in UTC
epoch_utc = get_epoch()
from erfa import ErfaWarning
with warnings.catch_warnings():
# Catch possible dubious year warnings from erfa
warnings.filterwarnings("ignore", category=ErfaWarning)
_epoch = Time(epoch_utc, scale="utc", format="isot")
_epoch.format = "jd"
return _epoch
class TimeStardate(TimeFromEpoch):
"""
Stardate: date units from 2318-07-05 12:00:00 UTC.
For example, stardate 41153.7 is 00:52 on April 30, 2363.
See http://trekguide.com/Stardates.htm#TNG for calculations and reference points.
"""
name = "stardate"
unit = 0.397766856 # Stardate units per day
epoch_val = "2318-07-05 11:00:00" # Date and time of stardate 00000.00
epoch_val2 = None
epoch_scale = "tai"
epoch_format = "iso"
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = "astropy_time"
def __new__(
cls, val1, val2, scale, precision, in_subfmt, out_subfmt, from_jd=False
):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (
isinstance(val1_0, Time)
and all(type(val) is type(val1_0) for val in val1.flat)
):
raise TypeError(
f"Input values for {cls.name} class must all be the same "
"astropy Time type."
)
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
# Collect individual location values and merge into a single location.
if any(tm.location is not None for tm in val1):
if any(tm.location is None for tm in val1):
raise ValueError(
"cannot concatenate times unless all locations "
"are set or no locations are set"
)
locations = []
for tm in val1:
location = np.broadcast_to(
tm.location, tm._time.jd1.shape, subok=True
)
locations.append(np.atleast_1d(location))
location = np.concatenate(locations)
else:
location = None
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
location = val1_0.location
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(
jd1, jd2, scale, precision, in_subfmt, out_subfmt, from_jd=True
)
# Make a temporary hidden attribute to transfer location back to the
# parent Time object where it needs to live.
self._location = location
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object.
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = "datetime"
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError(
f"Input values for {self.name} class must be datetime objects"
)
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2."""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer(
[val1, None, None, None, None, None, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None] + 5 * [np.intc] + [np.double],
)
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"), *iterator.operands[1:]
)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None, out_subfmt=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None}, optional
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if out_subfmt is not None:
# Out_subfmt not allowed for this format, so raise the standard
# exception by trying to validate the value.
self._select_subfmts(out_subfmt)
if timezone is not None:
if self._scale != "utc":
raise ScaleValueError(
f"scale is {self._scale}, must be 'utc' when timezone is supplied."
)
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode("ascii")
iys, ims, ids, ihmsfs = erfa.d2dtf(
scale, 6, self.jd1, self.jd2_filled # 6 for microsec
)
ihrs = ihmsfs["h"]
imins = ihmsfs["m"]
isecs = ihmsfs["s"]
ifracs = ihmsfs["f"]
iterator = np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=7 * [None] + [object],
)
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError(
f"Time {(iy, im, id, ihr, imin, isec, ifracsec)} is within "
"a leap second but datetime does not support leap seconds"
)
if timezone is not None:
out[...] = datetime.datetime(
iy, im, id, ihr, imin, isec, ifracsec, tzinfo=TimezoneInfo()
).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimeYMDHMS(TimeUnique):
"""
ymdhms: A Time format to represent Time as year, month, day, hour,
minute, second (thus the name ymdhms).
Acceptable inputs must have keys or column names in the "YMDHMS" set of
``year``, ``month``, ``day`` ``hour``, ``minute``, ``second``:
- Dict with keys in the YMDHMS set
- NumPy structured array, record array or astropy Table, or single row
of those types, with column names in the YMDHMS set
One can supply a subset of the YMDHMS values, for instance only 'year',
'month', and 'day'. Inputs have the following defaults::
'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0
When the input is supplied as a ``dict`` then each value can be either a
scalar value or an array. The values will be broadcast to a common shape.
Example::
>>> from astropy.time import Time
>>> t = Time({'year': 2015, 'month': 2, 'day': 3,
... 'hour': 12, 'minute': 13, 'second': 14.567},
... scale='utc')
>>> t.iso
'2015-02-03 12:13:14.567'
>>> t.ymdhms.year
2015
"""
name = "ymdhms"
def _check_val_type(self, val1, val2):
"""
This checks inputs for the YMDHMS format.
It is bit more complex than most format checkers because of the flexible
input that is allowed. Also, it actually coerces ``val1`` into an appropriate
dict of ndarrays that can be used easily by ``set_jds()``. This is useful
because it makes it easy to get default values in that routine.
Parameters
----------
val1 : ndarray or None
val2 : ndarray or None
Returns
-------
val1_as_dict, val2 : val1 as dict or None, val2 is always None
"""
if val2 is not None:
raise ValueError("val2 must be None for ymdhms format")
ymdhms = ["year", "month", "day", "hour", "minute", "second"]
if val1.dtype.names:
# Convert to a dict of ndarray
val1_as_dict = {name: val1[name] for name in val1.dtype.names}
elif val1.shape == (0,):
# Input was empty list [], so set to None and set_jds will handle this
return None, None
elif (
val1.dtype.kind == "O"
and val1.shape == ()
and isinstance(val1.item(), dict)
):
# Code gets here for input as a dict. The dict input
# can be either scalar values or N-d arrays.
# Extract the item (which is a dict) and broadcast values to the
# same shape here.
names = val1.item().keys()
values = val1.item().values()
val1_as_dict = {
name: value for name, value in zip(names, np.broadcast_arrays(*values))
}
else:
raise ValueError("input must be dict or table-like")
# Check that the key names now are good.
names = val1_as_dict.keys()
required_names = ymdhms[: len(names)]
def comma_repr(vals):
return ", ".join(repr(val) for val in vals)
bad_names = set(names) - set(ymdhms)
if bad_names:
raise ValueError(
f"{comma_repr(bad_names)} not allowed as YMDHMS key name(s)"
)
if set(names) != set(required_names):
raise ValueError(
f"for {len(names)} input key names "
f"you must supply {comma_repr(required_names)}"
)
return val1_as_dict, val2
def set_jds(self, val1, val2):
if val1 is None:
# Input was empty list []
jd1 = np.array([], dtype=np.float64)
jd2 = np.array([], dtype=np.float64)
else:
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"),
val1["year"],
val1.get("month", 1),
val1.get("day", 1),
val1.get("hour", 0),
val1.get("minute", 0),
val1.get("second", 0),
)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
scale = self.scale.upper().encode("ascii")
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 9, self.jd1, self.jd2_filled)
out = np.empty(
self.jd1.shape,
dtype=[
("year", "i4"),
("month", "i4"),
("day", "i4"),
("hour", "i4"),
("minute", "i4"),
("second", "f8"),
],
)
out["year"] = iys
out["month"] = ims
out["day"] = ids
out["hour"] = ihmsfs["h"]
out["minute"] = ihmsfs["m"]
out["second"] = ihmsfs["s"] + ihmsfs["f"] * 10 ** (-9)
out = out.view(np.recarray)
return self.mask_if_needed(out)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0 * u.day, dst=0 * u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity`, optional
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity`, optional
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : str or None, optional
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = "UTC"
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
**Fast C-based parser**
Time format classes can take advantage of a fast C-based parser if the times
are represented as fixed-format strings with year, month, day-of-month,
hour, minute, second, OR year, day-of-year, hour, minute, second. This can
be a factor of 20 or more faster than the pure Python parser.
Fixed format means that the components always have the same number of
characters. The Python parser will accept ``2001-9-2`` as a date, but the C
parser would require ``2001-09-02``.
A subclass in this case must define a class attribute ``fast_parser_pars``
which is a `dict` with all of the keys below. An inherited attribute is not
checked, only an attribute in the class ``__dict__``.
- ``delims`` (tuple of int): ASCII code for character at corresponding
``starts`` position (0 => no character)
- ``starts`` (tuple of int): position where component starts (including
delimiter if present). Use -1 for the month component for format that use
day of year.
- ``stops`` (tuple of int): position where component ends. Use -1 to
continue to end of string, or for the month component for formats that use
day of year.
- ``break_allowed`` (tuple of int): if true (1) then the time string can
legally end just before the corresponding component (e.g. "2000-01-01"
is a valid time but "2000-01-01 12" is not).
- ``has_day_of_year`` (int): 0 if dates have year, month, day; 1 if year,
day-of-year
"""
def __init_subclass__(cls, **kwargs):
if "fast_parser_pars" in cls.__dict__:
fpp = cls.fast_parser_pars
fpp = np.array(
list(
zip(
map(chr, fpp["delims"]),
fpp["starts"],
fpp["stops"],
fpp["break_allowed"],
)
),
_parse_times.dt_pars,
)
if cls.fast_parser_pars["has_day_of_year"]:
fpp["start"][1] = fpp["stop"][1] = -1
cls._fast_parser = _parse_times.create_parser(fpp)
super().__init_subclass__(**kwargs)
def _check_val_type(self, val1, val2):
if val1.dtype.kind not in ("S", "U") and val1.size:
raise TypeError(f"Input values for {self.name} class must be strings")
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ("year", "mon", "mday", "hour", "min", "sec")
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex(".")
except Exception:
timestr_has_fractional_digits = False
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
timestr_has_fractional_digits = True
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
subfmt_has_sec = "%S" in strptime_fmt_or_regex
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, "tm_" + component) for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [
int(tm.get(component, default))
for component, default in zip(components, defaults)
]
subfmt_has_sec = "sec" in tm
# Add fractional seconds if they were in the original time string
# and the subformat has seconds. A time like "2022-08-01.123" will
# never pass this for a format like ISO and will raise a parsing
# exception.
if timestr_has_fractional_digits:
if subfmt_has_sec:
vals[-1] = vals[-1] + fracsec
else:
continue
return vals
else:
raise ValueError(f"Time {timestr} does not match {self.name} format")
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2."""
# If specific input subformat is required then use the Python parser.
# Also do this if Time format class does not define `use_fast_parser` or
# if the fast parser is entirely disabled. Note that `use_fast_parser`
# is ignored for format classes that don't have a fast parser.
if (
self.in_subfmt != "*"
or "_fast_parser" not in self.__class__.__dict__
or conf.use_fast_parser == "False"
):
jd1, jd2 = self.get_jds_python(val1, val2)
else:
try:
jd1, jd2 = self.get_jds_fast(val1, val2)
except Exception:
# Fall through to the Python parser unless fast is forced.
if conf.use_fast_parser == "force":
raise
else:
jd1, jd2 = self.get_jds_python(val1, val2)
self.jd1 = jd1
self.jd2 = jd2
def get_jds_python(self, val1, val2):
"""Parse the time strings contained in val1 and get jd1, jd2."""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (
str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer(
[val1, None, None, None, None, None, None],
flags=["zerosize_ok"],
op_dtypes=[None] + 5 * [np.intc] + [np.double],
)
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
(
iy[...],
im[...],
id[...],
ihr[...],
imin[...],
dsec[...],
) = self.parse_string(val, subfmts)
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"), *iterator.operands[1:]
)
jd1, jd2 = day_frac(jd1, jd2)
return jd1, jd2
def get_jds_fast(self, val1, val2):
"""Use fast C parser to parse time strings in val1 and get jd1, jd2."""
# Handle bytes or str input and convert to uint8. We need to the
# dtype _parse_times.dt_u1 instead of uint8, since otherwise it is
# not possible to create a gufunc with structured dtype output.
# See note about ufunc type resolver in pyerfa/erfa/ufunc.c.templ.
if val1.dtype.kind == "U":
# Note: val1.astype('S') is *very* slow, so we check ourselves
# that the input is pure ASCII.
val1_uint32 = val1.view((np.uint32, val1.dtype.itemsize // 4))
if np.any(val1_uint32 > 127):
raise ValueError("input is not pure ASCII")
# It might be possible to avoid making a copy via astype with
# cleverness in parse_times.c but leave that for another day.
chars = val1_uint32.astype(_parse_times.dt_u1)
else:
chars = val1.view((_parse_times.dt_u1, val1.dtype.itemsize))
# Call the fast parsing ufunc.
time_struct = self._fast_parser(chars)
jd1, jd2 = erfa.dtf2d(
self.scale.upper().encode("ascii"),
time_struct["year"],
time_struct["month"],
time_struct["day"],
time_struct["hour"],
time_struct["minute"],
time_struct["second"],
)
return day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = (self.scale.upper().encode("ascii"),)
iys, ims, ids, ihmsfs = erfa.d2dtf(
scale, self.precision, self.jd1, self.jd2_filled
)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
yday = None
has_yday = "{yday:" in str_fmt
ihrs = ihmsfs["h"]
imins = ihmsfs["m"]
isecs = ihmsfs["s"]
ifracs = ihmsfs["f"]
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs], flags=["zerosize_ok"]
):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {
"year": int(iy),
"mon": int(im),
"day": int(id),
"hour": int(ihr),
"min": int(imin),
"sec": int(isec),
"fracsec": int(ifracsec),
"yday": yday,
}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith("{sec:02d}"):
str_fmt += ".{fracsec:0" + str(self.precision) + "d}"
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "iso"
subfmts = (
(
"date_hms",
"%Y-%m-%d %H:%M:%S",
# XXX To Do - use strftime for output ??
"{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}",
),
(
"date_hm",
"%Y-%m-%d %H:%M",
"{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}",
),
("date", "%Y-%m-%d", "{year:d}-{mon:02d}-{day:02d}"),
)
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000-01-12 13:14:15.678"
# 01234567890123456789012
# yyyy-mm-dd hh:mm:ss.fff
# Parsed as ('yyyy', '-mm', '-dd', ' hh', ':mm', ':ss', '.fff')
fast_parser_pars = dict(
delims=(0, ord("-"), ord("-"), ord(" "), ord(":"), ord(":"), ord(".")),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0,
)
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith("Z"):
if self.scale != "utc":
raise ValueError("Time input terminating in 'Z' must have scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "isot"
subfmts = (
(
"date_hms",
"%Y-%m-%dT%H:%M:%S",
"{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"date_hm",
"%Y-%m-%dT%H:%M",
"{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}",
),
("date", "%Y-%m-%d", "{year:d}-{mon:02d}-{day:02d}"),
)
# See TimeISO for explanation
fast_parser_pars = dict(
delims=(0, ord("-"), ord("-"), ord("T"), ord(":"), ord(":"), ord(".")),
starts=(0, 4, 7, 10, 13, 16, 19),
stops=(3, 6, 9, 12, 15, 18, -1),
# Break allowed *before*
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=0,
)
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = "yday"
subfmts = (
(
"date_hms",
"%Y:%j:%H:%M:%S",
"{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}",
),
("date_hm", "%Y:%j:%H:%M", "{year:d}:{yday:03d}:{hour:02d}:{min:02d}"),
("date", "%Y:%j", "{year:d}:{yday:03d}"),
)
# Define positions and starting delimiter for year, month, day, hour,
# minute, seconds components of an ISO time. This is used by the fast
# C-parser parse_ymdhms_times()
#
# "2000:123:13:14:15.678"
# 012345678901234567890
# yyyy:ddd:hh:mm:ss.fff
# Parsed as ('yyyy', ':ddd', ':hh', ':mm', ':ss', '.fff')
#
# delims: character at corresponding `starts` position (0 => no character)
# starts: position where component starts (including delimiter if present)
# stops: position where component ends (-1 => continue to end of string)
fast_parser_pars = dict(
delims=(0, 0, ord(":"), ord(":"), ord(":"), ord(":"), ord(".")),
starts=(0, -1, 4, 8, 11, 14, 17),
stops=(3, -1, 7, 10, 13, 16, -1),
# Break allowed before:
# y m d h m s f
break_allowed=(0, 0, 0, 1, 0, 1, 1),
has_day_of_year=1,
)
class TimeDatetime64(TimeISOT):
name = "datetime64"
def _check_val_type(self, val1, val2):
if not val1.dtype.kind == "M":
if val1.size > 0:
raise TypeError(
f"Input values for {self.name} class must be datetime64 objects"
)
else:
val1 = np.array([], "datetime64[D]")
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = "2000"
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ["datetime64[M]", "datetime64[Y]"]:
val1 = val1.astype("datetime64[D]")
val1 = val1.astype("S")
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype("datetime64")
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = "fits"
subfmts = (
(
"date_hms",
(
r"(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T"
r"(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)"
),
"{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"date",
r"(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)",
"{year:04d}-{mon:02d}-{day:02d}",
),
(
"longdate_hms",
(
r"(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T"
r"(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)"
),
"{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}",
),
(
"longdate",
r"(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)",
"{year:+06d}-{mon:02d}-{day:02d}",
),
)
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(
subfmt[0],
subfmt[1] + r"(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?",
subfmt[2],
)
for subfmt in subfmts
)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present."""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError(f"Time {timestr} does not match {self.name} format")
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm["scale"] is not None:
warnings.warn(
"FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning,
)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm["scale"].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(TIME_SCALES)}"
)
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError(
f"Input strings for {self.name} class must all "
"have consistent time scales."
)
return [
int(tm["year"]),
int(tm["mon"]),
int(tm["mday"]),
int(tm.get("hour", 0)),
int(tm.get("min", 0)),
float(tm.get("sec", 0.0)),
]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if "long" not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5):
self.out_subfmt = "long" + self.out_subfmt
return super().value
class TimeEpochDate(TimeNumeric):
"""
Base class for support floating point Besselian and Julian epoch dates.
"""
_default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
value = jd_to_epoch(self.jd1, self.jd2)
return super().to_value(jd1=value, jd2=np.float64(0.0), **kwargs)
value = property(to_value)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0."""
name = "byear"
epoch_to_jd = "epb2jd"
jd_to_epoch = "epb"
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes."""
if hasattr(val1, "to") and hasattr(val1, "unit") and val1.unit is not None:
raise ValueError(
"Cannot use Quantities for 'byear' format, as the interpretation "
"would be ambiguous. Use float with Besselian year instead."
)
# FIXME: is val2 really okay here?
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0."""
name = "jyear"
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = "epj2jd"
jd_to_epoch = "epj"
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (
str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer(
[val1, None], op_dtypes=[val1.dtype, np.double], flags=["zerosize_ok"]
)
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError(f"Time {val} does not match {self.name} format")
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + "%." + str(self.precision) + "f"
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'."""
name = "byear_str"
epoch_to_jd = "epb2jd"
jd_to_epoch = "epb"
epoch_prefix = "B"
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'."""
name = "jyear_str"
epoch_to_jd = "epj2jd"
jd_to_epoch = "epj"
epoch_prefix = "J"
class TimeDeltaFormat(TimeFormat):
"""Base class for time delta representations."""
_registry = TIME_DELTA_FORMATS
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`.
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError(
f"Scale value '{scale}' not in allowed values {TIME_DELTA_SCALES}"
)
return scale
class TimeDeltaNumeric(TimeDeltaFormat, TimeNumeric):
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1.0 / self.unit)
def to_value(self, **kwargs):
# Note that 1/unit is always exactly representable, so the
# following multiplications are exact.
factor = 1.0 / self.unit
jd1 = self.jd1 * factor
jd2 = self.jd2 * factor
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDeltaSec(TimeDeltaNumeric):
"""Time delta in SI seconds."""
name = "sec"
unit = 1.0 / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaNumeric):
"""Time delta in Julian days (86400 SI seconds)."""
name = "jd"
unit = 1.0
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta."""
name = "datetime"
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError(
f"Input values for {self.name} class must be datetime.timedelta objects"
)
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer(
[val1, None, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None, np.double, np.double],
)
day = datetime.timedelta(days=1)
for val, jd1, jd2 in iterator:
jd1[...], other = divmod(val.item(), day)
jd2[...] = other / day
self.jd1, self.jd2 = day_frac(iterator.operands[-2], iterator.operands[-1])
@property
def value(self):
iterator = np.nditer(
[self.jd1, self.jd2, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None, None, object],
)
for jd1, jd2, out in iterator:
jd1_, jd2_ = day_frac(jd1, jd2)
out[...] = datetime.timedelta(days=jd1_, microseconds=jd2_ * 86400 * 1e6)
return self.mask_if_needed(iterator.operands[-1])
def _validate_jd_for_storage(jd):
if isinstance(jd, (float, int)):
return np.array(jd, dtype=np.float_)
if isinstance(jd, np.generic) and (
jd.dtype.kind == "f" and jd.dtype.itemsize <= 8 or jd.dtype.kind in "iu"
):
return np.array(jd, dtype=np.float_)
elif isinstance(jd, np.ndarray) and jd.dtype.kind == "f" and jd.dtype.itemsize == 8:
return jd
else:
raise TypeError(
"JD values must be arrays (possibly zero-dimensional) "
f"of floats but we got {jd!r} of type {type(jd)}"
)
def _broadcast_writeable(jd1, jd2):
if jd1.shape == jd2.shape:
return jd1, jd2
# When using broadcast_arrays, *both* are flagged with
# warn-on-write, even the one that wasn't modified, and
# require "C" only clears the flag if it actually copied
# anything.
shape = np.broadcast(jd1, jd2).shape
if jd1.shape == shape:
s_jd1 = jd1
else:
s_jd1 = np.require(np.broadcast_to(jd1, shape), requirements=["C", "W"])
if jd2.shape == shape:
s_jd2 = jd2
else:
s_jd2 = np.require(np.broadcast_to(jd2, shape), requirements=["C", "W"])
return s_jd1, s_jd2
# Import symbols from core.py that are used in this module. This succeeds
# because __init__.py imports format.py just before core.py.
from .core import TIME_DELTA_SCALES, TIME_SCALES, ScaleValueError, Time # noqa: E402
|
4eba434461928a0dddba062d11e543e1342a331f83b725814c4e474b2c2aff56 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions.
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from . import format as unit_format
from .utils import (
is_effectively_unity,
resolve_fractions,
sanitize_scale,
validate_power,
)
__all__ = [
"UnitsError",
"UnitsWarning",
"UnitConversionError",
"UnitTypeError",
"UnitBase",
"NamedUnit",
"IrreducibleUnit",
"Unit",
"CompositeUnit",
"PrefixUnit",
"UnrecognizedUnit",
"def_unit",
"get_current_unit_registry",
"set_enabled_units",
"add_enabled_units",
"set_enabled_equivalencies",
"add_enabled_equivalencies",
"set_enabled_aliases",
"add_enabled_aliases",
"dimensionless_unscaled",
"one",
]
UNITY = 1.0
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
if not (
funit is Unit(funit)
and (tunit is None or tunit is Unit(tunit))
and callable(a)
and callable(b)
):
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[], aliases={}):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._aliases = init._aliases.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {
k: v.copy() for k, v in init._by_physical_type.items()
}
else:
self._reset_units()
self._reset_equivalencies()
self._reset_aliases()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
self.add_enabled_aliases(aliases)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
def _reset_aliases(self):
self._aliases = {}
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if st in self._registry and unit != self._registry[st]:
raise ValueError(
f"Object with name {st!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them."
)
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
@property
def aliases(self):
return self._aliases
def set_enabled_aliases(self, aliases):
"""
Set aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
self._reset_aliases()
self.add_enabled_aliases(aliases)
def add_enabled_aliases(self, aliases):
"""
Add aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
for alias, unit in aliases.items():
if alias in self._registry and unit != self._registry[alias]:
raise ValueError(
f"{alias} already means {self._registry[alias]}, so "
f"cannot be used as an alias for {unit}."
)
if alias in self._aliases and unit != self._aliases[alias]:
raise ValueError(
f"{alias} already is an alias for {self._aliases[alias]}, so "
f"cannot be used as an alias for {unit}."
)
for alias, unit in aliases.items():
if alias not in self._registry and alias not in self._aliases:
self._aliases[alias] = unit
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +FLOAT_CMP
<Quantity -1.+1.2246468e-16j>
"""
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
def set_enabled_aliases(aliases):
"""
Set aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().set_enabled_aliases(aliases)
return context
def add_enabled_aliases(aliases):
"""
Add aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Since no aliases are enabled by default, generally it is recommended
to use `set_enabled_aliases`.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_aliases(aliases)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
class UnitBase:
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
_hash = None
_type_id = None
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit."""
return unit_format.Generic.to_string(self).encode("unicode_escape")
def __str__(self):
"""Return string representation for unit."""
return unit_format.Generic.to_string(self)
def __repr__(self):
string = unit_format.Generic.to_string(self)
return f'Unit("{string}")'
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
if self._type_id is None:
unit = self.decompose()
self._type_id = tuple(zip((base.name for base in unit.bases), unit.powers))
return self._type_id
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. Perhaps you meant to_string()?"
)
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. Perhaps you meant to_string()?"
)
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. Perhaps you meant to_string()?"
)
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic):
"""
Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
f = unit_format.get_format(format)
return f.to_string(self)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""Normalizes equivalencies, ensuring each is a 4-tuple.
The resulting tuple is of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or None
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
p = validate_power(p)
return CompositeUnit(1, [self], [p], _error_check=False)
def __truediv__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rtruediv__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self ** (-1))
except TypeError:
return NotImplemented
def __mul__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, unit=self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, "unit"):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, unit=self)
except TypeError:
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __rrshift__(self, m):
warnings.warn(
">> is not implemented. Did you mean to convert "
f"to a Quantity with unit {m} using '<<'?",
AstropyWarning,
)
return NotImplemented
def __hash__(self):
if self._hash is None:
parts = (
[str(self.scale)]
+ [x.name for x in self.bases]
+ [str(x) for x in self.powers]
)
self._hash = hash(tuple(parts))
return self._hash
def __getstate__(self):
# If we get pickled, we should *not* store the memoized members since
# hashes of strings vary between sessions.
state = self.__dict__.copy()
state.pop("_hash", None)
state.pop("_type_id", None)
return state
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
# Other is unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1.0 or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1.0 or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.0
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, str, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other = Unit(other, parse_strict="silent")
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if self._get_physical_type_id() == other._get_physical_type_id():
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other / unit).decompose([a])
return True
except Exception:
pass
elif (a._is_equivalent(unit) and b._is_equivalent(other)) or (
b._is_equivalent(unit) and a._is_equivalent(other)
):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
ratio = other.decompose() / unit.decompose()
try:
ratio_in_funit = ratio.decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.0)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string("unscaled")
physical_type = unit.physical_type
if physical_type != "unknown":
unit_str = f"'{unit_str}' ({physical_type})"
else:
unit_str = f"'{unit_str}'"
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(f"{unit_str} and {other_str} are not convertible")
def _get_converter(self, other, equivalencies=[]):
"""Get a converter for values in ``self`` to ``other``.
If no conversion is necessary, returns ``unit_scale_converter``
(which is used as a check in quantity helpers).
"""
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
if scale == 1.0:
return unit_scale_converter
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies)
)
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, "equivalencies"):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
converter = self._get_converter(tunit, equivalencies)
except Exception:
pass
else:
return lambda v: b(converter(v))
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if self_decomposed.powers == other_decomposed.powers and all(
self_base is other_base
for (self_base, other_base) in zip(
self_decomposed.bases, other_decomposed.bases
)
):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(f"'{self!r}' is not a scaled version of '{other!r}'")
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit-like
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self._get_converter(Unit(other), equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : `~astropy.units.CompositeUnit`
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(
self, equivalencies=[], namespace=[], max_depth=2, depth=0, cached_results=None
):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
return all(base in namespace for base in unit.bases)
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [{unit}, set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit**power
tunit_decomposed = tunit_decomposed**power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append((len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth,
depth=depth + 1,
cached_results=cached_results,
)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append((len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
f"Cannot represent unit {self} in terms of the given units"
)
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(
self, equivalencies=[], units=None, max_depth=2, include_prefix_units=None
):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of `~astropy.units.Unit`, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (
isinstance(tunit, UnitBase)
and (include_prefix_units or not isinstance(tunit, PrefixUnit))
and has_bases_in_common_with_equiv(decomposed, tunit.decompose())
):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(units.values()))
elif inspect.ismodule(units):
units = filter_units(vars(units).values())
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(
self._compose(
equivalencies=equivalencies,
namespace=units,
max_depth=max_depth,
depth=0,
cached_results={},
)
)
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Physical type(s) dimensionally compatible with the unit.
Returns
-------
`~astropy.units.physical.PhysicalType`
A representation of the physical type(s) of a unit.
Examples
--------
>>> from astropy import units as u
>>> u.m.physical_type
PhysicalType('length')
>>> (u.m ** 2 / u.s).physical_type
PhysicalType({'diffusivity', 'kinematic viscosity'})
Physical types can be compared to other physical types
(recommended in packages) or to strings.
>>> area = (u.m ** 2).physical_type
>>> area == u.m.physical_type ** 2
True
>>> area == "area"
True
`~astropy.units.physical.PhysicalType` objects can be used for
dimensional analysis.
>>> number_density = u.m.physical_type ** -3
>>> velocity = (u.m / u.s).physical_type
>>> number_density * velocity
PhysicalType('particle flux')
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also pull options from.
See :ref:`astropy:unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
HEADING_NAMES = ("Primary name", "Unit definition", "Aliases")
ROW_LEN = 3 # len(HEADING_NAMES), but hard-code since it is constant
NO_EQUIV_UNITS_MSG = "There are no equivalent units"
def __repr__(self):
if len(self) == 0:
return self.NO_EQUIV_UNITS_MSG
else:
lines = self._process_equivalent_units(self)
lines.insert(0, self.HEADING_NAMES)
widths = [0] * self.ROW_LEN
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = lines[0:1] + ["["] + [f"{x} ," for x in lines[1:]] + ["]"]
return "\n".join(lines)
def _repr_html_(self):
"""
Outputs a HTML table representation within Jupyter notebooks.
"""
if len(self) == 0:
return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>"
else:
# HTML tags to use to compose the table in HTML
blank_table = '<table style="width:50%">{}</table>'
blank_row_container = "<tr>{}</tr>"
heading_row_content = "<th>{}</th>" * self.ROW_LEN
data_row_content = "<td>{}</td>" * self.ROW_LEN
# The HTML will be rendered & the table is simple, so don't
# bother to include newlines & indentation for the HTML code.
heading_row = blank_row_container.format(
heading_row_content.format(*self.HEADING_NAMES)
)
data_rows = self._process_equivalent_units(self)
all_rows = heading_row
for row in data_rows:
html_row = blank_row_container.format(data_row_content.format(*row))
all_rows += html_row
return blank_table.format(all_rows)
@staticmethod
def _process_equivalent_units(equiv_units_data):
"""
Extract attributes, and sort, the equivalent units pre-formatting.
"""
processed_equiv_units = []
for u in equiv_units_data:
irred = u.decompose().to_string()
if irred == u.name:
irred = "irreducible"
processed_equiv_units.append((u.name, irred, ", ".join(u.aliases)))
processed_equiv_units.sort()
return processed_equiv_units
def find_equivalent_units(
self, equivalencies=[], units=None, include_prefix_units=False
):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
Any list given, including an empty one, supersedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of `~astropy.units.Unit`, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies,
units=units,
max_depth=1,
include_prefix_units=include_prefix_units,
)
results = {x.bases[0] for x in results if len(x.bases) == 1}
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError("st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return f"{names[1]} ({names[0]})"
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
f"Object with name {name!r} already exists in "
f"given namespace ({namespace[name]!r})."
)
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (
_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__getstate__(),
)
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1], _error_check=False)
raise UnitConversionError(
f"Unit {self} can not be decomposed into the requested bases"
)
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
round-tripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return f"UnrecognizedUnit({str(self)})"
def __bytes__(self):
return self.name.encode("ascii", "replace")
def __str__(self):
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
f"The unit {self.name!r} is unrecognized, so all arithmetic operations "
"with it are invalid."
)
__pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = _unrecognized_operator
__lt__ = __gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator
def __eq__(self, other):
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
f"The unit {self.name!r} is unrecognized. It can not be converted "
"to other units."
)
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(type):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(
self,
s="",
represents=None,
format=None,
namespace=None,
doc=None,
parse_strict="raise",
):
# Short-circuit if we're already a unit
if hasattr(s, "_get_physical_type_id"):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
represents = CompositeUnit(
represents.value * represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers,
_error_check=False,
)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(
s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers,
_error_check=False,
)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super().__call__(
s, represents, format=format, namespace=namespace, doc=doc
)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, str)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if isinstance(s, bytes):
s = s.decode("ascii")
try:
return f.parse(s)
except NotImplementedError:
raise
except Exception as e:
if parse_strict == "silent":
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + " "
else:
format_clause = ""
msg = (
f"'{s}' did not parse as {format_clause}unit: {str(e)} "
"If this is meant to be a custom unit, "
"define it with 'u.def_unit'. To have it "
"recognized inside a file reader or other code, "
"enable it with 'u.add_enabled_units'. "
"For details, see "
"https://docs.astropy.org/en/latest/units/combining_and_defining.html"
)
if parse_strict == "raise":
raise ValueError(msg)
elif parse_strict == "warn":
warnings.warn(msg, UnitsWarning)
else:
raise ValueError(
"'parse_strict' must be 'warn', 'raise' or 'silent'"
)
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [], _error_check=False)
elif isinstance(s, tuple):
from .structured import StructuredUnit
return StructuredUnit(s)
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError(f"{s} can not be converted to a Unit")
class Unit(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From no arguments::
Unit()
Returns the dimensionless unit.
- The last form, which creates a new `Unit` is described in detail
below.
See also: https://docs.astropy.org/en/stable/units/
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None, format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc, format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self._represents))
return self._hash
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers, _error_check=False)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
_decomposed_cache = None
def __init__(
self,
scale,
bases,
powers,
decompose=False,
decompose_bases=set(),
_error_check=True,
):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError("bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
if not decompose and len(bases) == 1 and powers[0] >= 0:
# Short-cut; with one unit there's nothing to expand and gather,
# as that has happened already when creating the unit. But do only
# positive powers, since for negative powers we need to re-sort.
unit = bases[0]
power = powers[0]
if power == 1:
scale *= unit.scale
self._bases = unit.bases
self._powers = unit.powers
elif power == 0:
self._bases = []
self._powers = []
else:
scale *= unit.scale**power
self._bases = unit.bases
self._powers = [
operator.mul(*resolve_fractions(p, power)) for p in unit.powers
]
self._scale = sanitize_scale(scale)
else:
# Regular case: use inputs as preliminary scale, bases, and powers,
# then "expand and gather" identical bases, sanitize the scale, &c.
self._scale = scale
self._bases = bases
self._powers = powers
self._expand_and_gather(decompose=decompose, bases=decompose_bases)
def __repr__(self):
if len(self._bases):
return super().__repr__()
else:
if self._scale != 1.0:
return f"Unit(dimensionless with a scale of {self._scale})"
else:
return "Unit(dimensionless)"
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if bases and unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self._scale
for b, p in zip(self._bases, self._powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
scale *= b._scale**p
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in new_parts.items() if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], "name", "")))
self._bases = [x[0] for x in new_parts]
self._powers = [x[1] for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if not isinstance(base, IrreducibleUnit) or (
len(bases) and base not in bases
):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(
self.scale, self.bases, self.powers, decompose=True, decompose_bases=bases
)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(["Q"], ["quetta"], 1e30),
(["R"], ["ronna"], 1e27),
(["Y"], ["yotta"], 1e24),
(["Z"], ["zetta"], 1e21),
(["E"], ["exa"], 1e18),
(["P"], ["peta"], 1e15),
(["T"], ["tera"], 1e12),
(["G"], ["giga"], 1e9),
(["M"], ["mega"], 1e6),
(["k"], ["kilo"], 1e3),
(["h"], ["hecto"], 1e2),
(["da"], ["deka", "deca"], 1e1),
(["d"], ["deci"], 1e-1),
(["c"], ["centi"], 1e-2),
(["m"], ["milli"], 1e-3),
(["u"], ["micro"], 1e-6),
(["n"], ["nano"], 1e-9),
(["p"], ["pico"], 1e-12),
(["f"], ["femto"], 1e-15),
(["a"], ["atto"], 1e-18),
(["z"], ["zepto"], 1e-21),
(["y"], ["yocto"], 1e-24),
(["r"], ["ronto"], 1e-27),
(["q"], ["quecto"], 1e-30),
]
binary_prefixes = [
(["Ki"], ["kibi"], 2**10),
(["Mi"], ["mebi"], 2**20),
(["Gi"], ["gibi"], 2**30),
(["Ti"], ["tebi"], 2**40),
(["Pi"], ["pebi"], 2**50),
(["Ei"], ["exbi"], 2**60),
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == "u":
format["latex"] = r"\mu " + u.get_format_name("latex")
format["unicode"] = "\N{MICRO SIGN}" + u.get_format_name("unicode")
for key, val in u._format.items():
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(
names,
CompositeUnit(factor, [u], [1], _error_check=False),
namespace=namespace,
format=format,
)
def def_unit(
s,
represents=None,
doc=None,
format=None,
prefixes=False,
exclude_prefixes=[],
namespace=None,
):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `~astropy.units.UnitBase`
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc, format=format)
else:
result = IrreducibleUnit(s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(
result, excludes=exclude_prefixes, namespace=namespace, prefixes=prefixes
)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (np.ndarray, float, int, complex, np.void)):
return value
avalue = np.array(value)
if avalue.dtype.kind not in ["i", "f", "c"]:
raise ValueError(
"Value not scalar compatible or convertible to "
"an int, float, or complex array"
)
return avalue
def unit_scale_converter(val):
"""Function that just multiplies the value by unity.
This is a separate function so it can be recognized and
discarded in unit conversion.
"""
return 1.0 * _condition_arg(val)
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
aba641f66dee9c99aec33f76aacca57ff1abdf4479a12c29ccc6538cb34e7697 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# STDLIB
import numbers
import operator
import re
import warnings
from fractions import Fraction
# THIRD PARTY
import numpy as np
# LOCAL
from astropy import config as _config
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from .core import (
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
get_current_unit_registry,
)
from .format import Base, Latex
from .quantity_helper import can_have_arbitrary_unit, check_output, converters_and_unit
from .quantity_helper.function_helpers import (
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .structured import StructuredUnit, _structured_unit_like_dtype
from .utils import is_effectively_unity
__all__ = [
"Quantity",
"SpecificTypeQuantity",
"QuantityInfoBase",
"QuantityInfo",
"allclose",
"isclose",
]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ["Quantity.*"]
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity.
"""
latex_array_threshold = _config.ConfigItem(
100,
"The maximum size an array Quantity can be before its LaTeX "
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
"negative number means that the value will instead be whatever numpy "
"gets from get_printoptions.",
)
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities.
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
def __len__(self):
return len(self._dataiter)
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {"dtype", "unit"} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f"{val.value}"
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("value", "unit")
_construct_from_dict_args = ["value"]
_represent_as_dict_primary_data = "value"
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop("shape")
dtype = attrs.pop("dtype")
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {
key: (data if key == "value" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
map["copy"] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
If `None`, the normal `numpy.dtype` introspection is used, e.g.
preventing upcasting of integers.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __class_getitem__(cls, unit_shape_dtype):
"""Quantity Type Hints.
Unit-aware type hints are ``Annotated`` objects that encode the class,
the unit, and possibly shape and dtype information, depending on the
python and :mod:`numpy` versions.
Schematically, ``Annotated[cls[shape, dtype], unit]``
As a classmethod, the type is the class, ie ``Quantity``
produces an ``Annotated[Quantity, ...]`` while a subclass
like :class:`~astropy.coordinates.Angle` returns
``Annotated[Angle, ...]``.
Parameters
----------
unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple
Unit specification, can be the physical type (ie str or class).
If tuple, then the first element is the unit specification
and all other elements are for `numpy.ndarray` type annotations.
Whether they are included depends on the python and :mod:`numpy`
versions.
Returns
-------
`typing.Annotated`, `typing_extensions.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType`
Return type in this preference order:
* if python v3.9+ : `typing.Annotated`
* if :mod:`typing_extensions` is installed : `typing_extensions.Annotated`
* `astropy.units.Unit` or `astropy.units.PhysicalType`
Raises
------
TypeError
If the unit/physical_type annotation is not Unit-like or
PhysicalType-like.
Examples
--------
Create a unit-aware Quantity type annotation
>>> Quantity[Unit("s")]
Annotated[Quantity, Unit("s")]
See Also
--------
`~astropy.units.quantity_input`
Use annotations for unit checks on function arguments and results.
Notes
-----
With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also
static-type compatible.
"""
# LOCAL
from ._typing import HAS_ANNOTATED, Annotated
# process whether [unit] or [unit, shape, ptype]
if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype
target = unit_shape_dtype[0]
shape_dtype = unit_shape_dtype[1:]
else: # just unit
target = unit_shape_dtype
shape_dtype = ()
# Allowed unit/physical types. Errors if neither.
try:
unit = Unit(target)
except (TypeError, ValueError):
from astropy.units.physical import get_physical_type
try:
unit = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise TypeError(
"unit annotation is not a Unit or PhysicalType"
) from None
# Allow to sort of work for python 3.8- / no typing_extensions
# instead of bailing out, return the unit for `quantity_input`
if not HAS_ANNOTATED:
warnings.warn(
"Quantity annotations are valid static type annotations only"
" if Python is v3.9+ or `typing_extensions` is installed."
)
return unit
# Quantity does not (yet) properly extend the NumPy generics types,
# introduced in numpy v1.22+, instead just including the unit info as
# metadata using Annotated.
# TODO: ensure we do interact with NDArray.__class_getitem__.
return Annotated.__class_getitem__((cls, unit))
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# inexact -> upcast to float dtype
float_default = dtype is np.inexact
if float_default:
dtype = None
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and isinstance(value, cls)):
value = value.view(cls)
if float_default and value.dtype.kind in "iu":
dtype = float
return np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (
r"\s*[+-]?"
r"((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|"
r"([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))"
r"([eE][+-]?\d+)?"
r"[.+-]?"
)
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError(
f'Cannot parse "{value}" as a {cls.__name__}. It does not '
"start with a number."
)
unit_string = v.string[v.end() :].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif isiterable(value) and len(value) > 0:
# Iterables like lists and tuples.
if all(isinstance(v, Quantity) for v in value):
# If a list/tuple containing only quantities, convert all
# to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
elif (
dtype is None
and not hasattr(value, "dtype")
and isinstance(unit, StructuredUnit)
):
# Special case for list/tuple of values and a structured unit:
# ``np.array(value, dtype=None)`` would treat tuples as lower
# levels of the array, rather than as elements of a structured
# array, so we use the structure of the unit to help infer the
# structured dtype of the value.
dtype = unit._recursively_get_dtype(value)
using_default_unit = False
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
using_default_unit = True
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError(
f"The unit attribute {value.unit!r} of the input could "
"not be parsed as an astropy Unit."
) from exc
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# For no-user-input unit, make sure the constructed unit matches the
# structure of the data.
if using_default_unit and value.dtype.names is not None:
unit = value_unit = _structured_unit_like_dtype(value_unit, value.dtype)
# check that array contains numbers or long int objects
if value.dtype.kind in "OSU" and not (
value.dtype.kind == "O" and isinstance(value.item(0), numbers.Number)
):
raise TypeError("The value must be a valid Python or Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if float_default and value.dtype.kind in "iuO":
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, "_quantity_class", cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, "_unit", None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if "info" in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError(
"__array_wrap__ should not be used with a context any more since all "
"use should go through array_function. Please raise an issue on "
"https://github.com/astropy/astropy"
)
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity` or `NotImplemented`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
try:
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get("out", None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs["out"] = (out_array,) if function.nout == 1 else out_array
if method == "reduce" and "initial" in kwargs and unit is not None:
# Special-case for initial argument for reductions like
# np.add.reduce. This should be converted to the output unit as
# well, which is typically the same as the input unit (but can
# in principle be different: unitless for np.equal, radian
# for np.arctan2, though those are not necessarily useful!)
kwargs["initial"] = self._to_own_unit(
kwargs["initial"], check_precision=False, unit=unit
)
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, "value", input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
except (TypeError, ValueError) as e:
out_normalized = kwargs.get("out", tuple())
inputs_and_outputs = inputs + out_normalized
ignored_ufunc = (
None,
np.ndarray.__array_ufunc__,
type(self).__array_ufunc__,
)
if not all(
getattr(type(io), "__array_ufunc__", None) in ignored_ufunc
for io in inputs_and_outputs
):
return NotImplemented
else:
raise e
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in zip(result, unit, out)
)
if out is None:
# View the result array as a Quantity with the proper unit.
return (
result if unit is None else self._new_view(result, unit, finalize=False)
)
elif isinstance(out, Quantity):
# For given Quantity output, just set the unit. We know the unit
# is not None and the output is of the correct Quantity subclass,
# as it was passed through check_output.
# (We cannot do this unconditionally, though, since it is possible
# for out to be ndarray and the unit to be dimensionless.)
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None, finalize=True):
"""Create a Quantity view of some array-like input, and set the unit.
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
finalize : bool, optional
Whether to call ``__array_finalize__`` to transfer properties from
``self`` to the new view of ``obj`` (e.g., ``info`` for all
subclasses, or ``_wrap_angle`` for `~astropy.coordinates.Latitude`).
Default: `True`, as appropriate for, e.g., unit conversions or slicing,
where the nature of the object does not change.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, "_quantity_class", Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
if finalize:
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initializer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
if isinstance(self._unit, StructuredUnit) or isinstance(
unit, StructuredUnit
):
unit = StructuredUnit(unit, self.dtype)
else:
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict="silent")
if not isinstance(unit, (UnitBase, StructuredUnit)):
raise UnitTypeError(
f"{self.__class__.__name__} instances require normal units, "
f"not {unit.__class__} instances."
)
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
if not self.dtype.names or isinstance(self.unit, StructuredUnit):
# Standard path, let unit to do work.
return self.unit.to(
unit, self.view(np.ndarray), equivalencies=equivalencies
)
else:
# The .to() method of a simple unit cannot convert a structured
# dtype, so we work around it, by recursing.
# TODO: deprecate this?
# Convert simple to Structured on initialization?
result = np.empty_like(self.view(np.ndarray))
for name in self.dtype.names:
result[name] = self[name]._to_value(unit, equivalencies)
return result
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See Also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See Also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
elif not self.dtype.names:
# For non-structured, we attempt a short-cut, where we just get
# the scale. If that is 1, we do not have to do anything.
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
else:
# For structured arrays, we go the default route.
value = self._to_value(unit, equivalencies)
# Index with empty tuple to decay array scalars in to numpy scalars.
return value if value.shape else value[()]
value = property(
to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""",
)
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
def _recursively_apply(self, func):
"""Apply function recursively to every field.
Returns a copy with the result.
"""
result = np.empty_like(self)
result_value = result.view(np.ndarray)
result_unit = ()
for name in self.dtype.names:
part = func(self[name])
result_value[name] = part.value
result_unit += (part.unit,)
result._set_unit(result_unit)
return result
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("si"))
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale, si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("cgs"))
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale, cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return super().__dir__()
dir_values = set(super().__dir__())
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(equivalencies):
dir_values.update(equivalent.names)
return sorted(dir_values)
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member"
)
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies
)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'"
)
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting, and does not
# deal well with structured arrays (nor does the ufunc).
def __eq__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return False
except Exception:
return NotImplemented
return self.value.__eq__(other_value)
def __ne__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return True
except Exception:
return NotImplemented
return self.value.__ne__(other_value)
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented # try other.__rlshift__(self)
try:
factor = self.unit._to(other)
except UnitConversionError: # incompatible, or requires an Equivalency
return NotImplemented
except AttributeError: # StructuredUnit does not have `_to`
# In principle, in-place might be possible.
return NotImplemented
view = self.view(np.ndarray)
try:
view *= factor # operates on view
except TypeError:
# The error is `numpy.core._exceptions._UFuncOutputCastingError`,
# which inherits from `TypeError`.
return NotImplemented
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(
">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning,
)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
"""Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), other * self.unit, finalize=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
"""
Right Multiplication between `Quantity` objects and other objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
"""Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), self.unit / other, finalize=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
"""Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1.0 / self.value, other / self.unit, finalize=False)
return super().__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(
self.value ** float(other), self.unit**other, finalize=False
)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value is not"
" iterable"
)
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
if isinstance(key, str) and isinstance(self.unit, StructuredUnit):
return self._new_view(
self.view(np.ndarray)[key], self.unit[key], finalize=False
)
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value "
"does not support indexing"
)
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
if isinstance(i, str):
# Indexing will cause a different unit, so by doing this in
# two steps we effectively try with the right unit.
self[i][...] = value
return
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and "info" in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""This method raises ValueError, since truthiness of quantities is ambiguous,
especially for logarithmic units and temperatures. Use explicit comparisons.
"""
raise ValueError(
f"{type(self).__name__} truthiness is ambiguous, especially for logarithmic units"
" and temperatures. Use explicit comparisons."
)
def __len__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value has no len()"
)
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError(
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = " " + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string that uses
negative exponents instead of fractions
subfmt : str, optional
Subformat of the result. For the moment, only used for
``format='latex'`` and ``format='latex_inline'``. Supported
values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt
)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
formats["latex_inline"] = formats["latex"]
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f"{self.value}{self._unitstr:s}"
else:
# np.array2string properly formats arrays as well as scalars
return (
np.array2string(self.value, precision=precision, floatmode="fixed")
+ self._unitstr
)
# else, for the moment we assume format="latex" or "latex_inline".
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value, format_spec=format_spec)
def complex_formatter(value):
return "({}{}i)".format(
Latex.format_exponential_notation(value.real, format_spec=format_spec),
Latex.format_exponential_notation(
value.imag, format_spec="+" + format_spec
),
)
# The view is needed for the scalar case - self.value might be float.
latex_value = np.array2string(
self.view(np.ndarray),
threshold=(
conf.latex_array_threshold
if conf.latex_array_threshold > -1
else pops["threshold"]
),
formatter={
"float_kind": float_formatter,
"complex_kind": complex_formatter,
},
max_line_width=np.inf,
separator=",~",
)
latex_value = latex_value.replace("...", r"\dots")
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
if self.unit is None:
latex_unit = _UNIT_NOT_INITIALISED
elif format == "latex":
latex_unit = self.unit._repr_latex_()[1:-1] # note this is unicode
elif format == "latex_inline":
latex_unit = self.unit.to_string(format="latex_inline")[1:-1]
delimiter_left, delimiter_right = formats[format][subfmt]
return rf"{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}"
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = "<" + self.__class__.__name__ + " "
arrstr = np.array2string(
self.view(np.ndarray), separator=", ", prefix=prefixstr
)
return f"{prefixstr}{arrstr}{self._unitstr:s}>"
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format="latex", subfmt="inline")
def __format__(self, format_spec):
try:
return self.to_string(format=format_spec)
except ValueError:
# We might have a unit format not implemented in `to_string()`.
if format_spec in Base.registry:
if self.unit is dimensionless_unscaled:
return f"{self.value}"
else:
return f"{self.value} {format(self.unit, format_spec)}"
# Can the value be formatted on its own?
try:
return f"{format(self.value, format_spec)}{self._unitstr:s}"
except ValueError:
# Format the whole thing as a single string.
return format(f"{self.value}{self._unitstr:s}", format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, "scale"):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError(
"cannot make a list of Quantities. Get list of values with"
" q.value.tolist()."
)
def _to_own_unit(self, value, check_precision=True, *, unit=None):
"""Convert value to one's own unit (or that given).
Here, non-quantities are treated as dimensionless, and care is taken
for values of 0, infinity or nan, which are allowed to have any unit.
Parameters
----------
value : anything convertible to `~astropy.units.Quantity`
The value to be converted to the requested unit.
check_precision : bool
Whether to forbid conversion of float to integer if that changes
the input number. Default: `True`.
unit : `~astropy.units.Unit` or None
The unit to convert to. By default, the unit of ``self``.
Returns
-------
value : number or `~numpy.ndarray`
In the requested units.
"""
if unit is None:
unit = self.unit
try:
_value = value.to_value(unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if value is np.ma.masked or (
value is np.ma.masked_print_option and self.dtype.kind == "O"
):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if not hasattr(value, "unit") and can_have_arbitrary_unit(
as_quantity.value
):
_value = as_quantity.value
else:
raise
if self.dtype.kind == "i" and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all((self_dtype_array == _value) | np.isnan(_value)):
raise TypeError(
"cannot convert value type to array type without precision loss"
)
# Setting names to ensure things like equality work (note that
# above will have failed already if units did not match).
if self.dtype.names:
_value.dtype.names = self.dtype.names
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] + (self._to_own_unit(args[-1]),)))
def tostring(self, order="C"):
"""Not implemented, use ``.value.tostring()`` instead."""
raise NotImplementedError(
"cannot write Quantities to string. Write array with"
" q.value.tostring(...)."
)
def tobytes(self, order="C"):
"""Not implemented, use ``.value.tobytes()`` instead."""
raise NotImplementedError(
"cannot write Quantities to bytes. Write array with q.value.tobytes(...)."
)
def tofile(self, fid, sep="", format="%s"):
"""Not implemented, use ``.value.tofile()`` instead."""
raise NotImplementedError(
"cannot write Quantities to file. Write array with q.value.tofile(...)"
)
def dump(self, file):
"""Not implemented, use ``.value.dump()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to file. Write array with q.value.dump()"
)
def dumps(self):
"""Not implemented, use ``.value.dumps()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to string. Write array with q.value.dumps()"
)
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode="raise"):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode="raise"):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode="raise"):
raise NotImplementedError(
"cannot choose based on quantity. Choose using array with"
" q.value.choose(...)"
)
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind="quicksort", order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(
np.array(self), self._to_own_unit(v, check_precision=False), *args, **kwargs
) # avoid numpy 1.6 problem
if NUMPY_LT_1_22:
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
else:
def argmax(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmax(axis=axis, out=out, keepdims=keepdims)
def argmin(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmin(axis=axis, out=out, keepdims=keepdims)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn(
f"function '{function.__name__}' is not known to astropy's Quantity."
" Will run it anyway, hoping it will treat ndarray subclasses"
" correctly. Please raise an issue at"
" https://github.com/astropy/astropy/issues.",
AstropyWarning,
)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(
issubclass(t, np.ndarray) and not issubclass(t, Quantity) for t in types
):
raise TypeError(
f"the Quantity implementation cannot handle {function} "
"with the given arguments."
) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple(
(arg.value if isinstance(arg, Quantity) else arg) for arg in args
)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs["out"] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype, out=out)
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.var,
axis,
dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
unit=self.unit**2,
)
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
return self._wrap_function(
np.mean, axis, dtype, out=out, keepdims=keepdims, where=where
)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, "unit", dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)"
)
def any(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)"
)
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
if NUMPY_LT_1_22:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims)
else:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(
self, axis=None, out=None, keepdims=False, *, initial=None, where=True
):
if initial is not None:
initial = self._to_own_unit(initial)
return self._wrap_function(
np.nansum,
axis,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'".format(
type(self).__name__, self._equivalent_unit
)
+ (
", but no unit was given."
if unit is None
else f", so cannot set it to '{unit}'."
)
)
super()._set_unit(unit)
def isclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
92e9d215caa8036a19343f7d17e7f3e62925d125cc12f696241def17c6f435d6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module defines custom errors and exceptions used in astropy.coordinates.
"""
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"RangeError",
"BoundsError",
"IllegalHourError",
"IllegalMinuteError",
"IllegalSecondError",
"ConvertError",
"IllegalHourWarning",
"IllegalMinuteWarning",
"IllegalSecondWarning",
"UnknownSiteException",
]
class RangeError(ValueError):
"""
Raised when some part of an angle is out of its valid range.
"""
class BoundsError(RangeError):
"""
Raised when an angle is outside of its user-specified bounds.
"""
class IllegalHourError(RangeError):
"""
Raised when an hour value is not in the range [0,24).
Parameters
----------
hour : int, float
Examples
--------
.. code-block:: python
if not 0 <= hr < 24:
raise IllegalHourError(hour)
"""
def __init__(self, hour):
self.hour = hour
def __str__(self):
return (
f"An invalid value for 'hours' was found ('{self.hour}'); must be in the"
" range [0,24)."
)
class IllegalHourWarning(AstropyWarning):
"""
Raised when an hour value is 24.
Parameters
----------
hour : int, float
"""
def __init__(self, hour, alternativeactionstr=None):
self.hour = hour
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'hour' was found to be '{self.hour}', which is not in range (-24, 24)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
class IllegalMinuteError(RangeError):
"""
Raised when an minute value is not in the range [0,60].
Parameters
----------
minute : int, float
Examples
--------
.. code-block:: python
if not 0 <= min < 60:
raise IllegalMinuteError(minute)
"""
def __init__(self, minute):
self.minute = minute
def __str__(self):
return (
f"An invalid value for 'minute' was found ('{self.minute}'); should be in"
" the range [0,60)."
)
class IllegalMinuteWarning(AstropyWarning):
"""
Raised when a minute value is 60.
Parameters
----------
minute : int, float
"""
def __init__(self, minute, alternativeactionstr=None):
self.minute = minute
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'minute' was found to be '{self.minute}', which is not in range [0,60)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
class IllegalSecondError(RangeError):
"""
Raised when an second value (time) is not in the range [0,60].
Parameters
----------
second : int, float
Examples
--------
.. code-block:: python
if not 0 <= sec < 60:
raise IllegalSecondError(second)
"""
def __init__(self, second):
self.second = second
def __str__(self):
return (
f"An invalid value for 'second' was found ('{self.second}'); should be in"
" the range [0,60)."
)
class IllegalSecondWarning(AstropyWarning):
"""
Raised when a second value is 60.
Parameters
----------
second : int, float
"""
def __init__(self, second, alternativeactionstr=None):
self.second = second
self.alternativeactionstr = alternativeactionstr
def __str__(self):
message = (
f"'second' was found to be '{self.second}', which is not in range [0,60)."
)
if self.alternativeactionstr is not None:
message += " " + self.alternativeactionstr
return message
# TODO: consider if this should be used to `units`?
class UnitsError(ValueError):
"""
Raised if units are missing or invalid.
"""
class ConvertError(Exception):
"""
Raised if a coordinate system cannot be converted to another.
"""
class UnknownSiteException(KeyError):
def __init__(self, site, attribute, close_names=None):
message = (
f"Site '{site}' not in database. Use {attribute} to see available sites."
)
if close_names:
message += " Did you mean one of: '{}'?'".format("', '".join(close_names))
self.site = site
self.attribute = attribute
self.close_names = close_names
return super().__init__(message)
|
335c64d7a6b23e3d96fd3b0b0a1383812edc0e1fb2006cf848e8c4f89c445585 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
import heapq
import inspect
import subprocess
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import contextmanager, suppress
from inspect import signature
from warnings import warn
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"TransformGraph",
"CoordinateTransform",
"FunctionTransform",
"BaseAffineTransform",
"AffineTransform",
"StaticMatrixTransform",
"DynamicMatrixTransform",
"FunctionTransformWithFiniteDifference",
"CompositeTransform",
]
def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result
class TransformGraph:
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, "name", None)
if nm is not None:
if not isinstance(nm, list):
nm = [nm]
for name in nm:
dct[name] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this TransformGraph.
"""
if self._cached_frame_set is None:
self._cached_frame_set = set()
for a in self._graph:
self._cached_frame_set.add(a)
for b in self._graph[a]:
self._cached_frame_set.add(b)
return self._cached_frame_set.copy()
@property
def frame_attributes(self):
"""
A `dict` of all the attributes of all frame classes in this TransformGraph.
"""
if self._cached_frame_attributes is None:
self._cached_frame_attributes = frame_attrs_from_set(self.frame_set)
return self._cached_frame_attributes
@property
def frame_component_names(self):
"""
A `set` of all component names every defined within any frame class in
this TransformGraph.
"""
if self._cached_component_names is None:
self._cached_component_names = frame_comps_from_set(self.frame_set)
return self._cached_component_names
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._cached_frame_attributes = None
self._cached_component_names = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : `~astropy.coordinates.CoordinateTransform`
The transformation object. Typically a
`~astropy.coordinates.CoordinateTransform` object, although it may
be some other callable that is called with the same signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
if not callable(transform):
raise TypeError("transform must be callable")
frame_set = self.frame_set.copy()
frame_set.add(fromsys)
frame_set.add(tosys)
# Now we check to see if any attributes on the proposed frames override
# *any* component names, which we can't allow for some of the logic in
# the SkyCoord initializer to work
attrs = set(frame_attrs_from_set(frame_set).keys())
comps = frame_comps_from_set(frame_set)
invalid_attrs = attrs.intersection(comps)
if invalid_attrs:
invalid_frames = set()
for attr in invalid_attrs:
if attr in fromsys.frame_attributes:
invalid_frames.update([fromsys])
if attr in tosys.frame_attributes:
invalid_frames.update([tosys])
raise ValueError(
f"Frame(s) {list(invalid_frames)} contain invalid attribute names:"
f" {invalid_attrs}\nFrame attributes can not conflict with *any* of"
" the frame data component names (see"
" `frame_transform_graph.frame_component_names`)."
)
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or None
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or None
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or None
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError("fromsys and tosys must both be None if either are")
if transform is None:
raise ValueError("cannot give all Nones to remove_transform")
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if agraph[b] is transform:
del agraph[b]
fromsys = a
break
# If the transform was found, need to break out of the outer for loop too
if fromsys:
break
else:
raise ValueError(f"Could not find transform {transform} in the graph")
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError(
f"Current transform from {fromsys} to {tosys} is not"
f" {transform}"
)
# Remove the subgraph if it is now empty
if self._graph[fromsys] == {}:
self._graph.pop(fromsys)
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of class or None
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : float or int
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float("inf")
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, "priority") else 1)
# otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(getattr(agraph[b], "priority", 1))
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError(
"n2 not in heap - this should be impossible!"
)
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""Generates and returns the CompositeTransform for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `~astropy.coordinates.CompositeTransform` or None
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
A `~astropy.coordinates.CompositeTransform` is always returned, because
`~astropy.coordinates.CompositeTransform` is slightly more adaptable in
the way it can be called than other transform classes. Specifically, it
takes care of intermediate steps of transformations in a way that is
consistent with 1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError("fromsys is not a class")
if not inspect.isclass(tosys):
raise TypeError("tosys is not a class")
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(
transforms, fromsys, tosys, register_graph=False
)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
`BaseCoordinateFrame` subclass
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(self._cached_names.keys())
def to_dot_graph(
self,
priorities=True,
addnodes=[],
savefn=None,
savelayout="plain",
saveformat=None,
color_edges=True,
):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : None or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
color_edges : bool
Color the edges between two nodes (frames) based on the type of
transform. ``FunctionTransform``: red, ``StaticMatrixTransform``:
blue, ``DynamicMatrixTransform``: green.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = {
f: [k for k, v in self._cached_names.items() if v == f]
for f in self.frame_set
}
for n in nodes:
if n in invclsaliases:
aliases = "`\\n`".join(invclsaliases[n])
nodenames.append(
'{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, aliases)
)
else:
nodenames.append(n.__name__ + "[ shape=oval ]")
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__] if color_edges else "black"
edgenames.append((a.__name__, b.__name__, pri, color))
# generate simple dot format graph
lines = ["digraph AstropyCoordinateTransformGraph {"]
lines.append("graph [rankdir=LR]")
lines.append("; ".join(nodenames) + ";")
for enm1, enm2, weights, color in edgenames:
labelstr_fmt = "[ {0} {1} ]"
if priorities:
priority_part = f'label = "{weights}"'
else:
priority_part = ""
color_part = f'color = "{color}"'
labelstr = labelstr_fmt.format(priority_part, color_part)
lines.append(f"{enm1} -> {enm2}{labelstr};")
lines.append("")
lines.append("overlap=false")
lines.append("}")
dotgraph = "\n".join(lines)
if savefn is not None:
if savelayout == "plain":
with open(savefn, "w") as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append("-T" + saveformat)
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise OSError("problem running graphviz: \n" + stderr)
with open(savefn, "w") as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <https://networkx.github.io/>`_
package installed for this to work.
Returns
-------
nxgraph : ``networkx.Graph``
This `~astropy.coordinates.TransformGraph` as a
`networkx.Graph <https://networkx.github.io/documentation/stable/reference/classes/graph.html>`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
transform = agraph[b]
pri = transform.priority if hasattr(transform, "priority") else 1
color = trans_to_color[transform.__class__]
nxgraph.add_edge(a, b, weight=pri, color=color)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1, **kwargs):
"""A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Additional keyword arguments are passed into the ``transcls``
constructor.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third are
``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use
`~astropy.coordinates.TransformGraph.add_transform` instead of this
decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(
func, fromsys, tosys, priority=priority, register_graph=self, **kwargs
)
return func
return deco
def _add_merged_transform(self, fromsys, tosys, *furthersys, priority=1):
"""
Add a single-step transform that encapsulates a multi-step transformation path,
using the transforms that already exist in the graph.
The created transform internally calls the existing transforms. If all of the
transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
This method is primarily useful for defining loopback transformations
(i.e., where ``fromsys`` and the final ``tosys`` are the same).
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform to.
*furthersys : class
Additional coordinate frame classes to transform to in order.
priority : number
The priority of this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Notes
-----
Even though the created transform is a single step in the graph, it
will still internally call the constituent transforms. Thus, there is
no performance benefit for using this created transform.
For Astropy's built-in frames, loopback transformations typically use
`~astropy.coordinates.ICRS` to be safe. Transforming through an inertial
frame ensures that changes in observation time and observer
location/velocity are properly accounted for.
An error will be raised if a direct transform between ``fromsys`` and
``tosys`` already exist.
"""
frames = [fromsys, tosys, *furthersys]
lastsys = frames[-1]
full_path = self.get_transform(fromsys, lastsys)
transforms = [
self.get_transform(frame_a, frame_b)
for frame_a, frame_b in zip(frames[:-1], frames[1:])
]
if None in transforms:
raise ValueError("This transformation path is not possible")
if len(full_path.transforms) == 1:
raise ValueError(
f"A direct transform for {fromsys.__name__}->{lastsys.__name__} already"
" exists"
)
self.add_transform(
fromsys,
lastsys,
CompositeTransform(
transforms, fromsys, lastsys, priority=priority
)._as_single_transform(),
)
@contextmanager
def impose_finite_difference_dt(self, dt):
"""
Context manager to impose a finite-difference time step on all applicable transformations.
For each transformation in this transformation graph that has the attribute
``finite_difference_dt``, that attribute is set to the provided value. The only standard
transformation with this attribute is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
Parameters
----------
dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the finite difference.
If a callable, should accept ``(fromcoord, toframe)`` and return the ``dt`` value.
"""
key = "finite_difference_dt"
saved_settings = []
try:
for to_frames in self._graph.values():
for transform in to_frames.values():
if hasattr(transform, key):
old_setting = (transform, key, getattr(transform, key))
saved_settings.append(old_setting)
setattr(transform, key, dt)
yield
finally:
for setting in saved_settings:
setattr(*setting)
# <-------------------Define the builtin transform classes-------------------->
class CoordinateTransform(metaclass=ABCMeta):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to start from.
tosys : `~astropy.coordinates.BaseCoordinateFrame` subclass
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError("fromsys must be a class")
if not inspect.isclass(tosys):
raise TypeError("tosys must be a class")
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError("fromsys and tosys must be classes")
self.overlapping_frame_attr_names = overlap = []
if hasattr(fromsys, "frame_attributes") and hasattr(tosys, "frame_attributes"):
# the if statement is there so that non-frame things might be usable
# if it makes sense
for from_nm in fromsys.frame_attributes:
if from_nm in tosys.frame_attributes:
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : `~astropy.coordinates.TransformGraph` object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary ``tosys.frame_attributes``.
Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not callable(func):
raise TypeError("func must be callable")
with suppress(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (
len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and sig.VAR_POSITIONAL not in kinds
):
raise ValueError("provided function does not accept two arguments")
self.func = func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError(
f"the transformation function yielded {res} but "
f"should have been of type {self.tosys}"
)
if fromcoord.data.differentials and not res.data.differentials:
warn(
"Applied a FunctionTransform to a coordinate frame with "
"differentials, but the FunctionTransform does not handle "
"differentials, so they have been dropped.",
AstropyWarning,
)
return res
class FunctionTransformWithFiniteDifference(FunctionTransform):
r"""Transormation based on functions using finite difference for velocities.
A coordinate transformation that works like a
`~astropy.coordinates.FunctionTransform`, but computes velocity shifts
based on the finite-difference relative to one of the frame attributes.
Note that the transform function should *not* change the differential at
all in this case, as any differentials will be overridden.
When a differential is in the from coordinate, the finite difference
calculation has two components. The first part is simple the existing
differential, but re-orientation (using finite-difference techniques) to
point in the direction the velocity vector has in the *new* frame. The
second component is the "induced" velocity. That is, the velocity
intrinsic to the frame itself, estimated by shifting the frame using the
``finite_difference_frameattr_name`` frame attribute a small amount
(``finite_difference_dt``) in time and re-calculating the position.
Parameters
----------
finite_difference_frameattr_name : str or None
The name of the frame attribute on the frames to use for the finite
difference. Both the to and the from frame will be checked for this
attribute, but only one needs to have it. If None, no velocity
component induced from the frame itself will be included - only the
re-orientation of any existing differential.
finite_difference_dt : `~astropy.units.Quantity` ['time'] or callable
If a quantity, this is the size of the differential used to do the
finite difference. If a callable, should accept
``(fromcoord, toframe)`` and return the ``dt`` value.
symmetric_finite_difference : bool
If True, the finite difference is computed as
:math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or
if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter
case has slightly better performance (and more stable finite difference
behavior).
All other parameters are identical to the initializer for
`~astropy.coordinates.FunctionTransform`.
"""
def __init__(
self,
func,
fromsys,
tosys,
priority=1,
register_graph=None,
finite_difference_frameattr_name="obstime",
finite_difference_dt=1 * u.second,
symmetric_finite_difference=True,
):
super().__init__(func, fromsys, tosys, priority, register_graph)
self.finite_difference_frameattr_name = finite_difference_frameattr_name
self.finite_difference_dt = finite_difference_dt
self.symmetric_finite_difference = symmetric_finite_difference
@property
def finite_difference_frameattr_name(self):
return self._finite_difference_frameattr_name
@finite_difference_frameattr_name.setter
def finite_difference_frameattr_name(self, value):
if value is None:
self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False
else:
diff_attr_in_fromsys = value in self.fromsys.frame_attributes
diff_attr_in_tosys = value in self.tosys.frame_attributes
if diff_attr_in_fromsys or diff_attr_in_tosys:
self._diff_attr_in_fromsys = diff_attr_in_fromsys
self._diff_attr_in_tosys = diff_attr_in_tosys
else:
raise ValueError(
f"Frame attribute name {value} is not a frame attribute of"
f" {self.fromsys} or {self.tosys}"
)
self._finite_difference_frameattr_name = value
def __call__(self, fromcoord, toframe):
from .representation import CartesianDifferential, CartesianRepresentation
supcall = self.func
if fromcoord.data.differentials:
# this is the finite difference case
if callable(self.finite_difference_dt):
dt = self.finite_difference_dt(fromcoord, toframe)
else:
dt = self.finite_difference_dt
halfdt = dt / 2
from_diffless = fromcoord.realize_frame(
fromcoord.data.without_differentials()
)
reprwithoutdiff = supcall(from_diffless, toframe)
# first we use the existing differential to compute an offset due to
# the already-existing velocity, but in the new frame
fromcoord_cart = fromcoord.cartesian
if self.symmetric_finite_difference:
fwdxyz = (
fromcoord_cart.xyz
+ fromcoord_cart.differentials["s"].d_xyz * halfdt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
backxyz = (
fromcoord_cart.xyz
- fromcoord_cart.differentials["s"].d_xyz * halfdt
)
back = supcall(
fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe
)
else:
fwdxyz = (
fromcoord_cart.xyz + fromcoord_cart.differentials["s"].d_xyz * dt
)
fwd = supcall(
fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe
)
back = reprwithoutdiff
diffxyz = (fwd.cartesian - back.cartesian).xyz / dt
# now we compute the "induced" velocities due to any movement in
# the frame itself over time
attrname = self.finite_difference_frameattr_name
if attrname is not None:
if self.symmetric_finite_difference:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + halfdt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + halfdt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) - halfdt}
from_diffless_back = from_diffless.replicate(**kws)
else:
from_diffless_back = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) - halfdt}
back_frame = toframe.replicate_without_data(**kws)
else:
back_frame = toframe
back = supcall(from_diffless_back, back_frame)
else:
if self._diff_attr_in_fromsys:
kws = {attrname: getattr(from_diffless, attrname) + dt}
from_diffless_fwd = from_diffless.replicate(**kws)
else:
from_diffless_fwd = from_diffless
if self._diff_attr_in_tosys:
kws = {attrname: getattr(toframe, attrname) + dt}
fwd_frame = toframe.replicate_without_data(**kws)
else:
fwd_frame = toframe
fwd = supcall(from_diffless_fwd, fwd_frame)
back = reprwithoutdiff
diffxyz += (fwd.cartesian - back.cartesian).xyz / dt
newdiff = CartesianDifferential(diffxyz)
reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(
newdiff
)
return reprwithoutdiff.realize_frame(reprwithdiff)
else:
return supcall(fromcoord, toframe)
class BaseAffineTransform(CoordinateTransform):
"""Base class for common functionality between the ``AffineTransform``-type
subclasses.
This base class is needed because `~astropy.coordinates.AffineTransform`
and the matrix transform classes share the ``__call__()`` method, but
differ in how they generate the affine parameters.
`~astropy.coordinates.StaticMatrixTransform` passes in a matrix stored as a
class attribute, and both of the matrix transforms pass in ``None`` for the
offset. Hence, user subclasses would likely want to subclass this (rather
than `~astropy.coordinates.AffineTransform`) if they want to provide
alternative transformations using this machinery.
"""
def _apply_transform(self, fromcoord, matrix, offset):
from .representation import (
CartesianDifferential,
RadialDifferential,
SphericalCosLatDifferential,
SphericalDifferential,
UnitSphericalRepresentation,
)
data = fromcoord.data
has_velocity = "s" in data.differentials
# Bail out if no transform is actually requested
if matrix is None and offset is None:
return data
# list of unit differentials
_unit_diffs = (
SphericalDifferential._unit_differential,
SphericalCosLatDifferential._unit_differential,
)
unit_vel_diff = has_velocity and isinstance(
data.differentials["s"], _unit_diffs
)
rad_vel_diff = has_velocity and isinstance(
data.differentials["s"], RadialDifferential
)
# Some initial checking to short-circuit doing any re-representation if
# we're going to fail anyways:
if isinstance(data, UnitSphericalRepresentation) and offset is not None:
raise TypeError(
"Position information stored on coordinate frame "
"is insufficient to do a full-space position "
"transformation (representation class: {data.__class__})"
)
elif (
has_velocity
and (unit_vel_diff or rad_vel_diff)
and offset is not None
and "s" in offset.differentials
):
# Coordinate has a velocity, but it is not a full-space velocity
# that we need to do a velocity offset
raise TypeError(
"Velocity information stored on coordinate frame is insufficient to do"
" a full-space velocity transformation (differential class:"
f" {data.differentials['s'].__class__})"
)
elif len(data.differentials) > 1:
# We should never get here because the frame initializer shouldn't
# allow more differentials, but this just adds protection for
# subclasses that somehow skip the checks
raise ValueError(
"Representation passed to AffineTransform contains multiple associated"
" differentials. Only a single differential with velocity units is"
f" presently supported (differentials: {data.differentials})."
)
# If the representation is a UnitSphericalRepresentation, and this is
# just a MatrixTransform, we have to try to turn the differential into a
# Unit version of the differential (if no radial velocity) or a
# sphericaldifferential with zero proper motion (if only a radial
# velocity) so that the matrix operation works
if (
has_velocity
and isinstance(data, UnitSphericalRepresentation)
and not unit_vel_diff
and not rad_vel_diff
):
# retrieve just velocity differential
unit_diff = data.differentials["s"].represent_as(
data.differentials["s"]._unit_differential, data
)
data = data.with_differentials({"s": unit_diff}) # updates key
# If it's a RadialDifferential, we flat-out ignore the differentials
# This is because, by this point (past the validation above), we can
# only possibly be doing a rotation-only transformation, and that
# won't change the radial differential. We later add it back in
elif rad_vel_diff:
data = data.without_differentials()
# Convert the representation and differentials to cartesian without
# having them attached to a frame
rep = data.to_cartesian()
diffs = {
k: diff.represent_as(CartesianDifferential, data)
for k, diff in data.differentials.items()
}
rep = rep.with_differentials(diffs)
# Only do transform if matrix is specified. This is for speed in
# transformations that only specify an offset (e.g., LSR)
if matrix is not None:
# Note: this applies to both representation and differentials
rep = rep.transform(matrix)
# TODO: if we decide to allow arithmetic between representations that
# contain differentials, this can be tidied up
if offset is not None:
newrep = rep.without_differentials() + offset.without_differentials()
else:
newrep = rep.without_differentials()
# We need a velocity (time derivative) and, for now, are strict: the
# representation can only contain a velocity differential and no others.
if has_velocity and not rad_vel_diff:
veldiff = rep.differentials["s"] # already in Cartesian form
if offset is not None and "s" in offset.differentials:
veldiff = veldiff + offset.differentials["s"]
newrep = newrep.with_differentials({"s": veldiff})
if isinstance(fromcoord.data, UnitSphericalRepresentation):
# Special-case this because otherwise the return object will think
# it has a valid distance with the default return (a
# CartesianRepresentation instance)
if has_velocity and not unit_vel_diff and not rad_vel_diff:
# We have to first represent as the Unit types we converted to,
# then put the d_distance information back in to the
# differentials and re-represent as their original forms
newdiff = newrep.differentials["s"]
_unit_cls = fromcoord.data.differentials["s"]._unit_differential
newdiff = newdiff.represent_as(_unit_cls, newrep)
kwargs = {comp: getattr(newdiff, comp) for comp in newdiff.components}
kwargs["d_distance"] = fromcoord.data.differentials["s"].d_distance
diffs = {
"s": fromcoord.data.differentials["s"].__class__(
copy=False, **kwargs
)
}
elif has_velocity and unit_vel_diff:
newdiff = newrep.differentials["s"].represent_as(
fromcoord.data.differentials["s"].__class__, newrep
)
diffs = {"s": newdiff}
else:
diffs = newrep.differentials
newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs
newrep = newrep.with_differentials(diffs)
elif has_velocity and unit_vel_diff:
# Here, we're in the case where the representation is not
# UnitSpherical, but the differential *is* one of the UnitSpherical
# types. We have to convert back to that differential class or the
# resulting frame will think it has a valid radial_velocity. This
# can probably be cleaned up: we currently have to go through the
# dimensional version of the differential before representing as the
# unit differential so that the units work out (the distance length
# unit shouldn't appear in the resulting proper motions)
diff_cls = fromcoord.data.differentials["s"].__class__
newrep = newrep.represent_as(
fromcoord.data.__class__, diff_cls._dimensional_differential
)
newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls)
# We pulled the radial differential off of the representation
# earlier, so now we need to put it back. But, in order to do that, we
# have to turn the representation into a repr that is compatible with
# having a RadialDifferential
if has_velocity and rad_vel_diff:
newrep = newrep.represent_as(fromcoord.data.__class__)
newrep = newrep.with_differentials({"s": fromcoord.data.differentials["s"]})
return newrep
def __call__(self, fromcoord, toframe):
params = self._affine_params(fromcoord, toframe)
newrep = self._apply_transform(fromcoord, *params)
return toframe.realize_frame(newrep)
@abstractmethod
def _affine_params(self, fromcoord, toframe):
pass
class AffineTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a 3 x 3
cartesian transformation matrix and a tuple of displacement vectors.
See `~astropy.coordinates.Galactocentric` for
an example.
Parameters
----------
transform_func : callable
A callable that has the signature ``transform_func(fromcoord, toframe)``
and returns: a (3, 3) matrix that operates on ``fromcoord`` in a
Cartesian representation, and a ``CartesianRepresentation`` with
(optionally) an attached velocity ``CartesianDifferential`` to represent
a translation and offset in velocity to apply after the matrix
operation.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``transform_func`` is not callable
"""
def __init__(self, transform_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(transform_func):
raise TypeError("transform_func is not callable")
self.transform_func = transform_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.transform_func(fromcoord, toframe)
class StaticMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError("Provided matrix is not 3 x 3")
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix, None
class DynamicMatrixTransform(BaseAffineTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1, register_graph=None):
if not callable(matrix_func):
raise TypeError("matrix_func is not callable")
self.matrix_func = matrix_func
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
def _affine_params(self, fromcoord, toframe):
return self.matrix_func(fromcoord, toframe), None
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `~astropy.coordinates.CoordinateTransform` object
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : float or int
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `~astropy.coordinates.TransformGraph` or None
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `~astropy.coordinates.StaticMatrixTransform`
will be collapsed into a single transformation to speed up the
calculation.
"""
def __init__(
self,
transforms,
fromsys,
tosys,
priority=1,
register_graph=None,
collapse_static_mats=True,
):
super().__init__(
fromsys, tosys, priority=priority, register_graph=register_graph
)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of StaticMatrixTransform's into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if isinstance(lasttrans, StaticMatrixTransform) and isinstance(
currtrans, StaticMatrixTransform
):
newtrans[-1] = StaticMatrixTransform(
currtrans.matrix @ lasttrans.matrix,
lasttrans.fromsys,
currtrans.tosys,
)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
# build an intermediate frame with attributes taken from either
# `toframe`, or if not there, `fromcoord`, or if not there, use
# the defaults
# TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.frame_attributes:
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutable, so copying is not needed
return curr_coord
def _as_single_transform(self):
"""
Return an encapsulated version of the composite transform so that it appears to
be a single transform.
The returned transform internally calls the constituent transforms. If all of
the transforms are affine, the merged transform is
`~astropy.coordinates.DynamicMatrixTransform` (if there are no
origin shifts) or `~astropy.coordinates.AffineTransform`
(otherwise). If at least one of the transforms is not affine, the merged
transform is
`~astropy.coordinates.FunctionTransformWithFiniteDifference`.
"""
# Create a list of the transforms including flattening any constituent CompositeTransform
transforms = [
t if not isinstance(t, CompositeTransform) else t._as_single_transform()
for t in self.transforms
]
if all([isinstance(t, BaseAffineTransform) for t in transforms]):
# Check if there may be an origin shift
fixed_origin = all(
isinstance(t, (StaticMatrixTransform, DynamicMatrixTransform))
for t in transforms
)
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return None if fixed_origin else (None, None)
# Create a merged attribute dictionary for any intermediate frames
# For any attributes shared by the "from"/"to" frames, the "to" frame takes
# precedence because this is the same choice implemented in __call__()
merged_attr = {
name: getattr(from_coo, name) for name in from_coo.frame_attributes
}
merged_attr.update(
{
name: getattr(to_frame, name)
for name in to_frame.frame_attributes
}
)
affine_params = (None, None)
# Step through each transform step (frame A -> frame B)
for i, t in enumerate(transforms):
# Extract the relevant attributes for frame A
if i == 0:
# If frame A is actually the initial frame, preserve its attributes
a_attr = {
name: getattr(from_coo, name)
for name in from_coo.frame_attributes
}
else:
a_attr = {
k: v
for k, v in merged_attr.items()
if k in t.fromsys.frame_attributes
}
# Extract the relevant attributes for frame B
b_attr = {
k: v
for k, v in merged_attr.items()
if k in t.tosys.frame_attributes
}
# Obtain the affine parameters for the transform
# Note that we insert some dummy data into frame A because the transformation
# machinery requires there to be data present. Removing that limitation
# is a possible TODO, but some care would need to be taken because some affine
# transforms have branching code depending on the presence of differentials.
next_affine_params = t._affine_params(
t.fromsys(from_coo.data, **a_attr), t.tosys(**b_attr)
)
# Combine the affine parameters with the running set
affine_params = _combine_affine_params(
affine_params, next_affine_params
)
# If there is no origin shift, return only the matrix
return affine_params[0] if fixed_origin else affine_params
# The return type depends on whether there is any origin shift
transform_type = DynamicMatrixTransform if fixed_origin else AffineTransform
else:
# Dynamically define the transformation function
def single_transform(from_coo, to_frame):
if from_coo.is_equivalent_frame(to_frame): # loopback to the same frame
return to_frame.realize_frame(from_coo.data)
return self(from_coo, to_frame)
transform_type = FunctionTransformWithFiniteDifference
return transform_type(
single_transform, self.fromsys, self.tosys, priority=self.priority
)
def _combine_affine_params(params, next_params):
"""
Combine two sets of affine parameters.
The parameters for an affine transformation are a 3 x 3 Cartesian
transformation matrix and a displacement vector, which can include an
attached velocity. Either type of parameter can be ``None``.
"""
M, vec = params
next_M, next_vec = next_params
# Multiply the transformation matrices if they both exist
if M is not None and next_M is not None:
new_M = next_M @ M
else:
new_M = M if M is not None else next_M
if vec is not None:
# Transform the first displacement vector by the second transformation matrix
if next_M is not None:
vec = vec.transform(next_M)
# Calculate the new displacement vector
if next_vec is not None:
if "s" in vec.differentials and "s" in next_vec.differentials:
# Adding vectors with velocities takes more steps
# TODO: Add support in representation.py
new_vec_velocity = vec.differentials["s"] + next_vec.differentials["s"]
new_vec = vec.without_differentials() + next_vec.without_differentials()
new_vec = new_vec.with_differentials({"s": new_vec_velocity})
else:
new_vec = vec + next_vec
else:
new_vec = vec
else:
new_vec = next_vec
return new_M, new_vec
# map class names to colorblind-safe colors
trans_to_color = {}
trans_to_color[AffineTransform] = "#555555" # gray
trans_to_color[FunctionTransform] = "#783001" # dark red-ish/brown
trans_to_color[FunctionTransformWithFiniteDifference] = "#d95f02" # red-ish
trans_to_color[StaticMatrixTransform] = "#7570b3" # blue-ish
trans_to_color[DynamicMatrixTransform] = "#1b9e77" # green-ish
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.