hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
b902e0dc08b75db97a4165f62e3bb7f8312b9c3c1d2cace6d2c1ba6e01e5ca1e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Regression tests for the units package."""
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.units import utils
def test_initialisation():
assert u.Unit(u.m) is u.m
ten_meter = u.Unit(10.0 * u.m)
assert ten_meter == u.CompositeUnit(10.0, [u.m], [1])
assert u.Unit(ten_meter) is ten_meter
assert u.Unit(10.0 * ten_meter) == u.CompositeUnit(100.0, [u.m], [1])
foo = u.Unit("foo", (10.0 * ten_meter) ** 2, namespace=locals())
assert foo == u.CompositeUnit(10000.0, [u.m], [2])
assert u.Unit("m") == u.m
assert u.Unit("") == u.dimensionless_unscaled
assert u.one == u.dimensionless_unscaled
assert u.Unit("10 m") == ten_meter
assert u.Unit(10.0) == u.CompositeUnit(10.0, [], [])
assert u.Unit() == u.dimensionless_unscaled
def test_invalid_power():
x = u.m ** Fraction(1, 3)
assert isinstance(x.powers[0], Fraction)
x = u.m ** Fraction(1, 2)
assert isinstance(x.powers[0], float)
# Test the automatic conversion to a fraction
x = u.m ** (1.0 / 3.0)
assert isinstance(x.powers[0], Fraction)
def test_invalid_compare():
assert not (u.m == u.s)
def test_convert():
assert u.h._get_converter(u.s)(1) == 3600
def test_convert_fail():
with pytest.raises(u.UnitsError):
u.cm.to(u.s, 1)
with pytest.raises(u.UnitsError):
(u.cm / u.s).to(u.m, 1)
def test_composite():
assert (u.cm / u.s * u.h)._get_converter(u.m)(1) == 36
assert u.cm * u.cm == u.cm**2
assert u.cm * u.cm * u.cm == u.cm**3
assert u.Hz.to(1000 * u.Hz, 1) == 0.001
def test_str():
assert str(u.cm) == "cm"
def test_repr():
assert repr(u.cm) == 'Unit("cm")'
def test_represents():
assert u.m.represents is u.m
assert u.km.represents.scale == 1000.0
assert u.km.represents.bases == [u.m]
assert u.Ry.scale == 1.0 and u.Ry.bases == [u.Ry]
assert_allclose(u.Ry.represents.scale, 13.605692518464949)
assert u.Ry.represents.bases == [u.eV]
bla = u.def_unit("bla", namespace=locals())
assert bla.represents is bla
blabla = u.def_unit("blabla", 10 * u.hr, namespace=locals())
assert blabla.represents.scale == 10.0
assert blabla.represents.bases == [u.hr]
assert blabla.decompose().scale == 10 * 3600
assert blabla.decompose().bases == [u.s]
def test_units_conversion():
assert_allclose(u.kpc.to(u.Mpc), 0.001)
assert_allclose(u.Mpc.to(u.kpc), 1000)
assert_allclose(u.yr.to(u.Myr), 1.0e-6)
assert_allclose(u.AU.to(u.pc), 4.84813681e-6)
assert_allclose(u.cycle.to(u.rad), 6.283185307179586)
assert_allclose(u.spat.to(u.sr), 12.56637061435917)
def test_units_manipulation():
# Just do some manipulation and check it's happy
(u.kpc * u.yr) ** Fraction(1, 3) / u.Myr
(u.AA * u.erg) ** 9
def test_decompose():
assert u.Ry == u.Ry.decompose()
def test_dimensionless_to_si():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the SI system
"""
testunit = (1.0 * u.kpc) / (1.0 * u.Mpc)
assert testunit.unit.physical_type == "dimensionless"
assert_allclose(testunit.si, 0.001)
def test_dimensionless_to_cgs():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the CGS system
"""
testunit = (1.0 * u.m) / (1.0 * u.km)
assert testunit.unit.physical_type == "dimensionless"
assert_allclose(testunit.cgs, 0.001)
def test_unknown_unit():
with pytest.warns(u.UnitsWarning, match="FOO"):
u.Unit("FOO", parse_strict="warn")
def test_multiple_solidus():
with pytest.warns(
u.UnitsWarning,
match="'m/s/kg' contains multiple slashes, which is discouraged",
):
assert u.Unit("m/s/kg").to_string() == "m / (kg s)"
with pytest.raises(ValueError):
u.Unit("m/s/kg", format="vounit")
# Regression test for #9000: solidi in exponents do not count towards this.
x = u.Unit("kg(3/10) * m(5/2) / s", format="vounit")
assert x.to_string() == "m(5/2) kg(3/10) / s"
def test_unknown_unit3():
unit = u.Unit("FOO", parse_strict="silent")
assert isinstance(unit, u.UnrecognizedUnit)
assert unit.name == "FOO"
unit2 = u.Unit("FOO", parse_strict="silent")
assert unit == unit2
assert unit.is_equivalent(unit2)
unit3 = u.Unit("BAR", parse_strict="silent")
assert unit != unit3
assert not unit.is_equivalent(unit3)
# Also test basic (in)equalities.
assert unit == "FOO"
assert unit != u.m
# next two from gh-7603.
assert unit != None
assert unit not in (None, u.m)
with pytest.raises(ValueError):
unit._get_converter(unit3)
_ = unit.to_string("latex")
_ = unit2.to_string("cgs")
with pytest.raises(ValueError):
u.Unit("BAR", parse_strict="strict")
with pytest.raises(TypeError):
u.Unit(None)
def test_invalid_scale():
with pytest.raises(TypeError):
["a", "b", "c"] * u.m
def test_cds_power():
unit = u.Unit("10+22/cm2", format="cds", parse_strict="silent")
assert unit.scale == 1e22
def test_register():
foo = u.def_unit("foo", u.m**3, namespace=locals())
assert "foo" in locals()
with u.add_enabled_units(foo):
assert "foo" in u.get_current_unit_registry().registry
assert "foo" not in u.get_current_unit_registry().registry
def test_in_units():
speed_unit = u.cm / u.s
_ = speed_unit.in_units(u.pc / u.hour, 1)
def test_null_unit():
assert (u.m / u.m) == u.Unit(1)
def test_unrecognized_equivalency():
assert u.m.is_equivalent("foo") is False
assert u.m.is_equivalent("pc") is True
def test_convertible_exception():
with pytest.raises(u.UnitsError, match=r"length.+ are not convertible"):
u.AA.to(u.h * u.s**2)
def test_convertible_exception2():
with pytest.raises(u.UnitsError, match=r"length. and .+time.+ are not convertible"):
u.m.to(u.s)
def test_invalid_type():
class A:
pass
with pytest.raises(TypeError):
u.Unit(A())
def test_steradian():
"""
Issue #599
"""
assert u.sr.is_equivalent(u.rad * u.rad)
results = u.sr.compose(units=u.cgs.bases)
assert results[0].bases[0] is u.rad
results = u.sr.compose(units=u.cgs.__dict__)
assert results[0].bases[0] is u.sr
def test_decompose_bases():
"""
From issue #576
"""
from astropy.constants import e
from astropy.units import cgs
d = e.esu.unit.decompose(bases=cgs.bases)
assert d._bases == [u.cm, u.g, u.s]
assert d._powers == [Fraction(3, 2), 0.5, -1]
assert d._scale == 1.0
def test_complex_compose():
complex = u.cd * u.sr * u.Wb
composed = complex.compose()
assert set(composed[0]._bases) == {u.lm, u.Wb}
def test_equiv_compose():
composed = u.m.compose(equivalencies=u.spectral())
assert any([u.Hz] == x.bases for x in composed)
def test_empty_compose():
with pytest.raises(u.UnitsError):
u.m.compose(units=[])
def _unit_as_str(unit):
# This function serves two purposes - it is used to sort the units to
# test alphabetically, and it is also use to allow pytest to show the unit
# in the [] when running the parametrized tests.
return str(unit)
# We use a set to make sure we don't have any duplicates.
COMPOSE_ROUNDTRIP = set()
for val in u.__dict__.values():
if isinstance(val, u.UnitBase) and not isinstance(val, u.PrefixUnit):
COMPOSE_ROUNDTRIP.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_ROUNDTRIP, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_roundtrip(unit):
composed_list = unit.decompose().compose()
found = False
for composed in composed_list:
if len(composed.bases):
if composed.bases[0] is unit:
found = True
break
elif len(unit.bases) == 0:
found = True
break
assert found
# We use a set to make sure we don't have any duplicates.
COMPOSE_CGS_TO_SI = set()
for val in u.cgs.__dict__.values():
# Can't decompose Celsius
if (
isinstance(val, u.UnitBase)
and not isinstance(val, u.PrefixUnit)
and val != u.cgs.deg_C
):
COMPOSE_CGS_TO_SI.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_CGS_TO_SI, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_cgs_to_si(unit):
si = unit.to_system(u.si)
assert [x.is_equivalent(unit) for x in si]
assert si[0] == unit.si
# We use a set to make sure we don't have any duplicates.
COMPOSE_SI_TO_CGS = set()
for val in u.si.__dict__.values():
# Can't decompose Celsius
if (
isinstance(val, u.UnitBase)
and not isinstance(val, u.PrefixUnit)
and val != u.si.deg_C
):
COMPOSE_SI_TO_CGS.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_SI_TO_CGS, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_si_to_cgs(unit):
# Can't convert things with Ampere to CGS without more context
try:
cgs = unit.to_system(u.cgs)
except u.UnitsError:
if u.A in unit.decompose().bases:
pass
else:
raise
else:
assert [x.is_equivalent(unit) for x in cgs]
assert cgs[0] == unit.cgs
def test_to_si():
"""Check units that are not official derived units.
Should not appear on its own or as part of a composite unit.
"""
# TODO: extend to all units not listed in Tables 1--6 of
# https://physics.nist.gov/cuu/Units/units.html
# See gh-10585.
# This was always the case
assert u.bar.si is not u.bar
# But this used to fail.
assert u.bar not in (u.kg / (u.s**2 * u.sr * u.nm)).si._bases
def test_to_cgs():
assert u.Pa.to_system(u.cgs)[1]._bases[0] is u.Ba
assert u.Pa.to_system(u.cgs)[1]._scale == 10.0
def test_decompose_to_cgs():
from astropy.units import cgs
assert u.m.decompose(bases=cgs.bases)._bases[0] is cgs.cm
def test_compose_issue_579():
unit = u.kg * u.s**2 / u.m
result = unit.compose(units=[u.N, u.s, u.m])
assert len(result) == 1
assert result[0]._bases == [u.s, u.N, u.m]
assert result[0]._powers == [4, 1, -2]
def test_compose_prefix_unit():
x = u.m.compose(units=(u.m,))
assert x[0].bases[0] is u.m
assert x[0].scale == 1.0
x = u.m.compose(units=[u.km], include_prefix_units=True)
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = u.m.compose(units=[u.km])
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = (u.km / u.s).compose(units=(u.pc, u.Myr))
assert x[0].bases == [u.pc, u.Myr]
assert_allclose(x[0].scale, 1.0227121650537077)
with pytest.raises(u.UnitsError):
(u.km / u.s).compose(units=(u.pc, u.Myr), include_prefix_units=False)
def test_self_compose():
unit = u.kg * u.s
assert len(unit.compose(units=[u.g, u.s])) == 1
def test_compose_failed():
unit = u.kg
with pytest.raises(u.UnitsError):
unit.compose(units=[u.N])
def test_compose_fractional_powers():
# Warning: with a complicated unit, this test becomes very slow;
# e.g., x = (u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2)
# takes 3 s
x = u.m**0.5 / u.yr**1.5
factored = x.compose()
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.cgs)
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.si)
for unit in factored:
assert x.decompose() == unit.decompose()
def test_compose_best_unit_first():
results = u.l.compose()
assert len(results[0].bases) == 1
assert results[0].bases[0] is u.l
results = (u.s**-1).compose()
assert results[0].bases[0] in (u.Hz, u.Bq)
results = (u.Ry.decompose()).compose()
assert results[0].bases[0] is u.Ry
def test_compose_no_duplicates():
new = u.kg / u.s**3 * u.au**2.5 / u.yr**0.5 / u.sr**2
composed = new.compose(units=u.cgs.bases)
assert len(composed) == 1
def test_long_int():
"""
Issue #672
"""
sigma = 10**21 * u.M_p / u.cm**2
sigma.to(u.M_sun / u.pc**2)
def test_endian_independence():
"""
Regression test for #744
A logic issue in the units code meant that big endian arrays could not be
converted because the dtype is '>f4', not 'float32', and the code was
looking for the strings 'float' or 'int'.
"""
for endian in ["<", ">"]:
for ntype in ["i", "f"]:
for byte in ["4", "8"]:
x = np.array([1, 2, 3], dtype=(endian + ntype + byte))
u.m.to(u.cm, x)
def test_radian_base():
"""
Issue #863
"""
assert (1 * u.degree).si.unit == u.rad
def test_no_as():
# We don't define 'as', since it is a keyword, but we
# do want to define the long form (`attosecond`).
assert not hasattr(u, "as")
assert hasattr(u, "attosecond")
def test_no_duplicates_in_names():
# Regression test for #5036
assert u.ct.names == ["ct", "count"]
assert u.ct.short_names == ["ct", "count"]
assert u.ct.long_names == ["count"]
assert set(u.ph.names) == set(u.ph.short_names) | set(u.ph.long_names)
def test_pickling():
p = pickle.dumps(u.m)
other = pickle.loads(p)
assert other is u.m
new_unit = u.IrreducibleUnit(["foo"], format={"baz": "bar"})
# This is local, so the unit should not be registered.
assert "foo" not in u.get_current_unit_registry().registry
# Test pickling of this unregistered unit.
p = pickle.dumps(new_unit)
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ["foo"]
assert new_unit_copy.get_format_name("baz") == "bar"
# It should still not be registered.
assert "foo" not in u.get_current_unit_registry().registry
# Now try the same with a registered unit.
with u.add_enabled_units([new_unit]):
p = pickle.dumps(new_unit)
assert "foo" in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is new_unit
# Check that a registered unit can be loaded and that it gets re-enabled.
with u.add_enabled_units([]):
assert "foo" not in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ["foo"]
assert new_unit_copy.get_format_name("baz") == "bar"
assert "foo" in u.get_current_unit_registry().registry
# And just to be sure, that it gets removed outside of the context.
assert "foo" not in u.get_current_unit_registry().registry
def test_pickle_between_sessions():
"""We cannot really test between sessions easily, so fake it.
This test can be changed if the pickle protocol or the code
changes enough that it no longer works.
"""
hash_m = hash(u.m)
unit = pickle.loads(
b"\x80\x04\x95\xd6\x00\x00\x00\x00\x00\x00\x00\x8c\x12"
b"astropy.units.core\x94\x8c\x1a_recreate_irreducible_unit"
b"\x94\x93\x94h\x00\x8c\x0fIrreducibleUnit\x94\x93\x94]\x94"
b"(\x8c\x01m\x94\x8c\x05meter\x94e\x88\x87\x94R\x94}\x94(\x8c\x06"
b"_names\x94]\x94(h\x06h\x07e\x8c\x0c_short_names"
b"\x94]\x94h\x06a\x8c\x0b_long_names\x94]\x94h\x07a\x8c\x07"
b"_format\x94}\x94\x8c\x07__doc__\x94\x8c "
b"meter: base unit of length in SI\x94ub."
)
assert unit is u.m
assert hash(u.m) == hash_m
@pytest.mark.parametrize(
"unit",
[u.IrreducibleUnit(["foo"], format={"baz": "bar"}), u.Unit("m_per_s", u.m / u.s)],
)
def test_pickle_does_not_keep_memoized_hash(unit):
"""
Tests private attribute since the problem with _hash being pickled
and restored only appeared if the unpickling was done in another
session, for which the hash no longer was valid, and it is difficult
to mimic separate sessions in a simple test. See gh-11872.
"""
unit_hash = hash(unit)
assert unit._hash is not None
unit_copy = pickle.loads(pickle.dumps(unit))
# unit is not registered so we get a copy.
assert unit_copy is not unit
assert unit_copy._hash is None
assert hash(unit_copy) == unit_hash
with u.add_enabled_units([unit]):
# unit is registered, so we get a reference.
unit_ref = pickle.loads(pickle.dumps(unit))
if isinstance(unit, u.IrreducibleUnit):
assert unit_ref is unit
else:
assert unit_ref is not unit
# pickle.load used to override the hash, although in this case
# it would be the same anyway, so not clear this tests much.
assert hash(unit) == unit_hash
def test_pickle_unrecognized_unit():
"""
Issue #2047
"""
a = u.Unit("asdf", parse_strict="silent")
pickle.loads(pickle.dumps(a))
def test_duplicate_define():
with pytest.raises(ValueError):
u.def_unit("m", namespace=u.__dict__)
def test_all_units():
from astropy.units.core import get_current_unit_registry
registry = get_current_unit_registry()
assert len(registry.all_units) > len(registry.non_prefix_units)
def test_repr_latex():
assert u.m._repr_latex_() == u.m.to_string("latex")
def test_operations_with_strings():
assert u.m / "5s" == (u.m / (5.0 * u.s))
assert u.m * "5s" == (5.0 * u.m * u.s)
def test_comparison():
assert u.m > u.cm
assert u.m >= u.cm
assert u.cm < u.m
assert u.cm <= u.m
with pytest.raises(u.UnitsError):
u.m > u.kg # noqa: B015
def test_compose_into_arbitrary_units():
# Issue #1438
from astropy.constants import G
G.decompose([u.kg, u.km, u.Unit("15 s")])
def test_unit_multiplication_with_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = "kg"
assert us * u1 == u.Unit(us) * u1
assert u1 * us == u1 * u.Unit(us)
def test_unit_division_by_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = "kg"
assert us / u1 == u.Unit(us) / u1
assert u1 / us == u1 / u.Unit(us)
def test_sorted_bases():
"""See #1616."""
assert (u.m * u.Jy).bases == (u.Jy * u.m).bases
def test_megabit():
"""See #1543"""
assert u.Mbit is u.Mb
assert u.megabit is u.Mb
assert u.Mbyte is u.MB
assert u.megabyte is u.MB
def test_composite_unit_get_format_name():
"""See #1576"""
unit1 = u.Unit("nrad/s")
unit2 = u.Unit("Hz(1/2)")
assert str(u.CompositeUnit(1, [unit1, unit2], [1, -1])) == "nrad / (Hz(1/2) s)"
def test_unicode_policy():
from astropy.tests.helper import assert_follows_unicode_guidelines
assert_follows_unicode_guidelines(u.degree, roundtrip=u.__dict__)
def test_suggestions():
for search, matches in [
("microns", "micron"),
("s/microns", "micron"),
("M", "m"),
("metre", "meter"),
("angstroms", "Angstrom or angstrom"),
("milimeter", "millimeter"),
("ångström", "Angstrom, angstrom, mAngstrom or mangstrom"),
("kev", "EV, eV, kV or keV"),
]:
with pytest.raises(ValueError, match=f"Did you mean {matches}"):
u.Unit(search)
def test_fits_hst_unit():
"""See #1911."""
with pytest.warns(u.UnitsWarning, match="multiple slashes") as w:
x = u.Unit("erg /s /cm**2 /angstrom")
assert x == u.erg * u.s**-1 * u.cm**-2 * u.angstrom**-1
assert len(w) == 1
def test_barn_prefixes():
"""Regression test for https://github.com/astropy/astropy/issues/3753"""
assert u.fbarn is u.femtobarn
assert u.pbarn is u.picobarn
def test_fractional_powers():
"""See #2069"""
m = 1e9 * u.Msun
tH = 1.0 / (70.0 * u.km / u.s / u.Mpc)
vc = 200 * u.km / u.s
x = (c.G**2 * m**2 * tH.cgs) ** Fraction(1, 3) / vc
v1 = x.to("pc")
x = (c.G**2 * m**2 * tH) ** Fraction(1, 3) / vc
v2 = x.to("pc")
x = (c.G**2 * m**2 * tH.cgs) ** (1.0 / 3.0) / vc
v3 = x.to("pc")
x = (c.G**2 * m**2 * tH) ** (1.0 / 3.0) / vc
v4 = x.to("pc")
assert_allclose(v1, v2)
assert_allclose(v2, v3)
assert_allclose(v3, v4)
x = u.m ** (1.0 / 101.0)
assert isinstance(x.powers[0], float)
x = u.m ** (3.0 / 7.0)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 3
assert x.powers[0].denominator == 7
x = u.cm ** Fraction(1, 2) * u.cm ** Fraction(2, 3)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(7, 6)
# Regression test for #9258.
x = (u.TeV ** (-2.2)) ** (1 / -2.2)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(1, 1)
def test_sqrt_mag():
sqrt_mag = u.mag**0.5
assert hasattr(sqrt_mag.decompose().scale, "imag")
assert (sqrt_mag.decompose()) ** 2 == u.mag
def test_composite_compose():
# Issue #2382
composite_unit = u.s.compose(units=[u.Unit("s")])[0]
u.s.compose(units=[composite_unit])
def test_data_quantities():
assert u.byte.is_equivalent(u.bit)
def test_compare_with_none():
# Ensure that equality comparisons with `None` work, and don't
# raise exceptions. We are deliberately not using `is None` here
# because that doesn't trigger the bug. See #3108.
assert not (u.m == None)
assert u.m != None
def test_validate_power_detect_fraction():
frac = utils.validate_power(1.1666666666666665)
assert isinstance(frac, Fraction)
assert frac.numerator == 7
assert frac.denominator == 6
def test_complex_fractional_rounding_errors():
# See #3788
kappa = 0.34 * u.cm**2 / u.g
r_0 = 886221439924.7849 * u.cm
q = 1.75
rho_0 = 5e-10 * u.solMass / u.solRad**3
y = 0.5
beta = 0.19047619047619049
a = 0.47619047619047628
m_h = 1e6 * u.solMass
t1 = 2 * c.c / (kappa * np.sqrt(np.pi))
t2 = (r_0**-q) / (rho_0 * y * beta * (a * c.G * m_h) ** 0.5)
result = (t1 * t2) ** -0.8
assert result.unit.physical_type == "length"
result.to(u.solRad)
def test_fractional_rounding_errors_simple():
x = (u.m**1.5) ** Fraction(4, 5)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 6
assert x.powers[0].denominator == 5
def test_enable_unit_groupings():
from astropy.units import cds
with cds.enable():
assert cds.geoMass in u.kg.find_equivalent_units()
from astropy.units import imperial
with imperial.enable():
assert imperial.inch in u.m.find_equivalent_units()
def test_unit_summary_prefixes():
"""
Test for a few units that the unit summary table correctly reports
whether or not that unit supports prefixes.
Regression test for https://github.com/astropy/astropy/issues/3835
"""
from astropy.units import astrophys
for summary in utils._iter_unit_summary(astrophys.__dict__):
unit, _, _, _, prefixes = summary
if unit.name == "lyr":
assert prefixes
elif unit.name == "pc":
assert prefixes
elif unit.name == "barn":
assert prefixes
elif unit.name == "cycle":
assert prefixes == "No"
elif unit.name == "spat":
assert prefixes == "No"
elif unit.name == "vox":
assert prefixes == "Yes"
def test_raise_to_negative_power():
"""Test that order of bases is changed when raising to negative power.
Regression test for https://github.com/astropy/astropy/issues/8260
"""
m2s2 = u.m**2 / u.s**2
spm = m2s2 ** (-1 / 2)
assert spm.bases == [u.s, u.m]
assert spm.powers == [1, -1]
assert spm == u.s / u.m
@pytest.mark.parametrize(
"name, symbol, multiplying_factor",
[
("quetta", "Q", 1e30),
("ronna", "R", 1e27),
("yotta", "Y", 1e24),
("zetta", "Z", 1e21),
("exa", "E", 1e18),
("peta", "P", 1e15),
("tera", "T", 1e12),
("giga", "G", 1e9),
("mega", "M", 1e6),
("kilo", "k", 1e3),
("deca", "da", 1e1),
("deci", "d", 1e-1),
("centi", "c", 1e-2),
("milli", "m", 1e-3),
("micro", "u", 1e-6),
("nano", "n", 1e-9),
("pico", "p", 1e-12),
("femto", "f", 1e-15),
("atto", "a", 1e-18),
("zepto", "z", 1e-21),
("yocto", "y", 1e-24),
("ronto", "r", 1e-27),
("quecto", "q", 1e-30),
],
)
def test_si_prefixes(name, symbol, multiplying_factor):
base = 1 * u.g
quantity_from_symbol = base.to(f"{symbol}g")
quantity_from_name = base.to(f"{name}gram")
assert u.isclose(quantity_from_name, base)
assert u.isclose(quantity_from_symbol, base)
value_ratio = base.value / quantity_from_symbol.value
assert u.isclose(value_ratio, multiplying_factor)
|
e9cf431eba3e5f8a0d0f25d3f0261117317f7e629a60b1586a2baf6be3f51355 | # The purpose of these tests are to ensure that calling quantities using
# array methods returns quantities with the right units, or raises exceptions.
import sys
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.utils.compat import NUMPY_LT_1_21_1, NUMPY_LT_1_22
class TestQuantityArrayCopy:
"""
Test whether arrays are properly copied/used in place
"""
def test_copy_on_creation(self):
v = np.arange(1000.0)
q_nocopy = u.Quantity(v, "km/s", copy=False)
q_copy = u.Quantity(v, "km/s", copy=True)
v[0] = -1.0
assert q_nocopy[0].value == v[0]
assert q_copy[0].value != v[0]
def test_to_copies(self):
q = u.Quantity(np.arange(1.0, 100.0), "km/s")
q2 = q.to(u.m / u.s)
assert np.all(q.value != q2.value)
q3 = q.to(u.km / u.s)
assert np.all(q.value == q3.value)
q[0] = -1.0 * u.km / u.s
assert q[0].value != q3[0].value
def test_si_copies(self):
q = u.Quantity(np.arange(100.0), "m/s")
q2 = q.si
assert np.all(q.value == q2.value)
q[0] = -1.0 * u.m / u.s
assert q[0].value != q2[0].value
def test_getitem_is_view(self):
"""Check that [keys] work, and that, like ndarray, it returns
a view, so that changing one changes the other.
Also test that one can add axes (closes #1422)
"""
q = u.Quantity(np.arange(100.0), "m/s")
q_sel = q[10:20]
q_sel[0] = -1.0 * u.m / u.s
assert q_sel[0] == q[10]
# also check that getitem can do new axes
q2 = q[:, np.newaxis]
q2[10, 0] = -9 * u.m / u.s
assert np.all(q2.flatten() == q)
def test_flat(self):
q = u.Quantity(np.arange(9.0).reshape(3, 3), "m/s")
q_flat = q.flat
# check that a single item is a quantity (with the right value)
assert q_flat[8] == 8.0 * u.m / u.s
# and that getting a range works as well
assert np.all(q_flat[0:2] == np.arange(2.0) * u.m / u.s)
# as well as getting items via iteration
q_flat_list = list(q.flat)
assert np.all(u.Quantity(q_flat_list) == u.Quantity(list(q.value.flat), q.unit))
# check that flat works like a view of the real array
q_flat[8] = -1.0 * u.km / u.s
assert q_flat[8] == -1.0 * u.km / u.s
assert q[2, 2] == -1.0 * u.km / u.s
# while if one goes by an iterated item, a copy is made
q_flat_list[8] = -2 * u.km / u.s
assert q_flat_list[8] == -2.0 * u.km / u.s
assert q_flat[8] == -1.0 * u.km / u.s
assert q[2, 2] == -1.0 * u.km / u.s
class TestQuantityReshapeFuncs:
"""Test different ndarray methods that alter the array shape
tests: reshape, squeeze, ravel, flatten, transpose, swapaxes
"""
def test_reshape(self):
q = np.arange(6.0) * u.m
q_reshape = q.reshape(3, 2)
assert isinstance(q_reshape, u.Quantity)
assert q_reshape.unit == q.unit
assert np.all(q_reshape.value == q.value.reshape(3, 2))
def test_squeeze(self):
q = np.arange(6.0).reshape(6, 1) * u.m
q_squeeze = q.squeeze()
assert isinstance(q_squeeze, u.Quantity)
assert q_squeeze.unit == q.unit
assert np.all(q_squeeze.value == q.value.squeeze())
def test_ravel(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_ravel = q.ravel()
assert isinstance(q_ravel, u.Quantity)
assert q_ravel.unit == q.unit
assert np.all(q_ravel.value == q.value.ravel())
def test_flatten(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_flatten = q.flatten()
assert isinstance(q_flatten, u.Quantity)
assert q_flatten.unit == q.unit
assert np.all(q_flatten.value == q.value.flatten())
def test_transpose(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_transpose = q.transpose()
assert isinstance(q_transpose, u.Quantity)
assert q_transpose.unit == q.unit
assert np.all(q_transpose.value == q.value.transpose())
def test_swapaxes(self):
q = np.arange(6.0).reshape(3, 1, 2) * u.m
q_swapaxes = q.swapaxes(0, 2)
assert isinstance(q_swapaxes, u.Quantity)
assert q_swapaxes.unit == q.unit
assert np.all(q_swapaxes.value == q.value.swapaxes(0, 2))
@pytest.mark.xfail(
sys.byteorder == "big" and NUMPY_LT_1_21_1, reason="Numpy GitHub Issue 19153"
)
def test_flat_attributes(self):
"""While ``flat`` doesn't make a copy, it changes the shape."""
q = np.arange(6.0).reshape(3, 1, 2) * u.m
qf = q.flat
# flat shape is same as before reshaping
assert len(qf) == 6
# see TestQuantityArrayCopy.test_flat for tests of iteration
# and slicing and setting. Here we test the properties and methods to
# match `numpy.ndarray.flatiter`
assert qf.base is q
# testing the indices -- flat and full -- into the array
assert qf.coords == (0, 0, 0) # to start
assert qf.index == 0
# now consume the iterator
endindices = [(qf.index, qf.coords) for x in qf][-2] # next() oversteps
assert endindices[0] == 5
assert endindices[1] == (2, 0, 1) # shape of q - 1
# also check q_flat copies properly
q_flat_copy = qf.copy()
assert all(q_flat_copy == q.flatten())
assert isinstance(q_flat_copy, u.Quantity)
assert not np.may_share_memory(q_flat_copy, q)
class TestQuantityStatsFuncs:
"""
Test statistical functions
"""
def test_mean(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert_array_equal(np.mean(q1), 3.6 * u.m)
assert_array_equal(np.mean(q1, keepdims=True), [3.6] * u.m)
def test_mean_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
qi2 = np.mean(q1, out=qi)
assert qi2 is qi
assert qi == 3.6 * u.m
def test_mean_where(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0, 7.0]) * u.m
assert_array_equal(np.mean(q1, where=q1 < 7 * u.m), 3.6 * u.m)
def test_std(self):
q1 = np.array([1.0, 2.0]) * u.m
assert_array_equal(np.std(q1), 0.5 * u.m)
assert_array_equal(q1.std(axis=-1, keepdims=True), [0.5] * u.m)
def test_std_inplace(self):
q1 = np.array([1.0, 2.0]) * u.m
qi = 1.5 * u.s
np.std(q1, out=qi)
assert qi == 0.5 * u.m
def test_std_where(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
assert_array_equal(np.std(q1, where=q1 < 3 * u.m), 0.5 * u.m)
def test_var(self):
q1 = np.array([1.0, 2.0]) * u.m
assert_array_equal(np.var(q1), 0.25 * u.m**2)
assert_array_equal(q1.var(axis=0, keepdims=True), [0.25] * u.m**2)
def test_var_inplace(self):
q1 = np.array([1.0, 2.0]) * u.m
qi = 1.5 * u.s
np.var(q1, out=qi)
assert qi == 0.25 * u.m**2
def test_var_where(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
assert_array_equal(np.var(q1, where=q1 < 3 * u.m), 0.25 * u.m**2)
def test_median(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.median(q1) == 4.0 * u.m
def test_median_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.median(q1, out=qi)
assert qi == 4 * u.m
def test_min(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.min(q1) == 1.0 * u.m
def test_min_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.min(q1, out=qi)
assert qi == 1.0 * u.m
def test_min_where(self):
q1 = np.array([0.0, 1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.min(q1, initial=10 * u.m, where=q1 > 0 * u.m) == 1.0 * u.m
def test_argmin(self):
q1 = np.array([6.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.argmin(q1) == 1
@pytest.mark.skipif(NUMPY_LT_1_22, reason="keepdims only introduced in numpy 1.22")
def test_argmin_keepdims(self):
q1 = np.array([[6.0, 2.0], [4.0, 5.0]]) * u.m
assert_array_equal(q1.argmin(axis=0, keepdims=True), np.array([[1, 0]]))
def test_max(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.max(q1) == 6.0 * u.m
def test_max_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.max(q1, out=qi)
assert qi == 6.0 * u.m
def test_max_where(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0, 7.0]) * u.m
assert np.max(q1, initial=0 * u.m, where=q1 < 7 * u.m) == 6.0 * u.m
def test_argmax(self):
q1 = np.array([5.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.argmax(q1) == 4
@pytest.mark.skipif(NUMPY_LT_1_22, reason="keepdims only introduced in numpy 1.22")
def test_argmax_keepdims(self):
q1 = np.array([[6.0, 2.0], [4.0, 5.0]]) * u.m
assert_array_equal(q1.argmax(axis=0, keepdims=True), np.array([[0, 1]]))
def test_clip(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km)
assert np.all(c1 == np.array([1.5, 2.0, 4.0, 5.0, 5.5]) * u.km / u.m)
def test_clip_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km, out=q1)
assert np.all(q1 == np.array([1.5, 2.0, 4.0, 5.0, 5.5]) * u.km / u.m)
c1[0] = 10 * u.Mm / u.mm
assert np.all(c1.value == q1.value)
def test_conj(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m
assert np.all(q1.conj() == q1)
def test_ptp(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.ptp(q1) == 5.0 * u.m
def test_ptp_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.ptp(q1, out=qi)
assert qi == 5.0 * u.m
def test_round(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
assert np.all(np.round(q1) == np.array([1, 2, 3]) * u.kg)
assert np.all(np.round(q1, decimals=2) == np.round(q1.value, decimals=2) * u.kg)
assert np.all(q1.round(decimals=2) == q1.value.round(decimals=2) * u.kg)
def test_round_inplace(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
qi = np.zeros(3) * u.s
a = q1.round(decimals=2, out=qi)
assert a is qi
assert np.all(q1.round(decimals=2) == qi)
def test_sum(self):
q1 = np.array([1.0, 2.0, 6.0]) * u.m
assert np.all(q1.sum() == 9.0 * u.m)
assert np.all(np.sum(q1) == 9.0 * u.m)
q2 = np.array([[4.0, 5.0, 9.0], [1.0, 1.0, 1.0]]) * u.s
assert np.all(q2.sum(0) == np.array([5.0, 6.0, 10.0]) * u.s)
assert np.all(np.sum(q2, 0) == np.array([5.0, 6.0, 10.0]) * u.s)
def test_sum_inplace(self):
q1 = np.array([1.0, 2.0, 6.0]) * u.m
qi = 1.5 * u.s
np.sum(q1, out=qi)
assert qi == 9.0 * u.m
def test_sum_where(self):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m
where = q1 < 7 * u.m
assert np.all(q1.sum(where=where) == 9.0 * u.m)
assert np.all(np.sum(q1, where=where) == 9.0 * u.m)
@pytest.mark.parametrize("initial", [0, 0 * u.m, 1 * u.km])
def test_sum_initial(self, initial):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m
expected = 16 * u.m + initial
assert q1.sum(initial=initial) == expected
assert np.sum(q1, initial=initial) == expected
def test_sum_dimensionless_initial(self):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.one
assert q1.sum(initial=1000) == 1016 * u.one
@pytest.mark.parametrize("initial", [10, 1 * u.s])
def test_sum_initial_exception(self, initial):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m
with pytest.raises(u.UnitsError):
q1.sum(initial=initial)
def test_cumsum(self):
q1 = np.array([1, 2, 6]) * u.m
assert np.all(q1.cumsum() == np.array([1, 3, 9]) * u.m)
assert np.all(np.cumsum(q1) == np.array([1, 3, 9]) * u.m)
q2 = np.array([4, 5, 9]) * u.s
assert np.all(q2.cumsum() == np.array([4, 9, 18]) * u.s)
assert np.all(np.cumsum(q2) == np.array([4, 9, 18]) * u.s)
def test_cumsum_inplace(self):
q1 = np.array([1, 2, 6]) * u.m
qi = np.ones(3) * u.s
np.cumsum(q1, out=qi)
assert np.all(qi == np.array([1, 3, 9]) * u.m)
q2 = q1
q1.cumsum(out=q1)
assert np.all(q2 == qi)
@pytest.mark.filterwarnings("ignore:The nansum method is deprecated")
def test_nansum(self):
q1 = np.array([1.0, 2.0, np.nan]) * u.m
assert np.all(q1.nansum() == 3.0 * u.m)
assert np.all(np.nansum(q1) == 3.0 * u.m)
q2 = np.array([[np.nan, 5.0, 9.0], [1.0, np.nan, 1.0]]) * u.s
assert np.all(q2.nansum(0) == np.array([1.0, 5.0, 10.0]) * u.s)
assert np.all(np.nansum(q2, 0) == np.array([1.0, 5.0, 10.0]) * u.s)
@pytest.mark.filterwarnings("ignore:The nansum method is deprecated")
def test_nansum_inplace(self):
q1 = np.array([1.0, 2.0, np.nan]) * u.m
qi = 1.5 * u.s
qout = q1.nansum(out=qi)
assert qout is qi
assert qi == np.nansum(q1.value) * q1.unit
qi2 = 1.5 * u.s
qout2 = np.nansum(q1, out=qi2)
assert qout2 is qi2
assert qi2 == np.nansum(q1.value) * q1.unit
@pytest.mark.xfail(
NUMPY_LT_1_22, reason="'where' keyword argument not supported for numpy < 1.22"
)
@pytest.mark.filterwarnings("ignore:The nansum method is deprecated")
def test_nansum_where(self):
q1 = np.array([1.0, 2.0, np.nan, 4.0]) * u.m
initial = 0 * u.m
where = q1 < 4 * u.m
assert np.all(q1.nansum(initial=initial, where=where) == 3.0 * u.m)
assert np.all(np.nansum(q1, initial=initial, where=where) == 3.0 * u.m)
def test_prod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.prod()
with pytest.raises(u.UnitsError) as exc:
np.prod(q1)
q2 = np.array([3.0, 4.0, 5.0]) * u.Unit(1)
assert q2.prod() == 60.0 * u.Unit(1)
assert np.prod(q2) == 60.0 * u.Unit(1)
def test_cumprod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.cumprod()
with pytest.raises(u.UnitsError) as exc:
np.cumprod(q1)
q2 = np.array([3, 4, 5]) * u.Unit(1)
assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1))
assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1))
def test_diff(self):
q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m
assert np.all(q1.diff() == np.array([1.0, 2.0, 6.0]) * u.m)
assert np.all(np.diff(q1) == np.array([1.0, 2.0, 6.0]) * u.m)
def test_ediff1d(self):
q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m
assert np.all(q1.ediff1d() == np.array([1.0, 2.0, 6.0]) * u.m)
assert np.all(np.ediff1d(q1) == np.array([1.0, 2.0, 6.0]) * u.m)
def test_dot_meth(self):
q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m
q2 = np.array([3.0, 4.0, 5.0, 6.0]) * u.s
q3 = q1.dot(q2)
assert q3.value == np.dot(q1.value, q2.value)
assert q3.unit == u.m * u.s
def test_trace_func(self):
q = np.array([[1.0, 2.0], [3.0, 4.0]]) * u.m
assert np.trace(q) == 5.0 * u.m
def test_trace_meth(self):
q1 = np.array([[1.0, 2.0], [3.0, 4.0]]) * u.m
assert q1.trace() == 5.0 * u.m
cont = u.Quantity(4.0, u.s)
q2 = np.array([[3.0, 4.0], [5.0, 6.0]]) * u.m
q2.trace(out=cont)
assert cont == 9.0 * u.m
def test_clip_func(self):
q = np.arange(10) * u.m
assert np.all(
np.clip(q, 3 * u.m, 6 * u.m)
== np.array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0]) * u.m
)
def test_clip_meth(self):
expected = np.array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0]) * u.m
q1 = np.arange(10) * u.m
q3 = q1.clip(3 * u.m, 6 * u.m)
assert np.all(q1.clip(3 * u.m, 6 * u.m) == expected)
cont = np.zeros(10) * u.s
q1.clip(3 * u.m, 6 * u.m, out=cont)
assert np.all(cont == expected)
class TestArrayConversion:
"""
Test array conversion methods
"""
def test_item(self):
q1 = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
assert q1.item(1) == 2 * q1.unit
q1.itemset(1, 1)
assert q1.item(1) == 1000 * u.m / u.km
q1.itemset(1, 100 * u.cm / u.km)
assert q1.item(1) == 1 * u.m / u.km
with pytest.raises(TypeError):
q1.itemset(1, 1.5 * u.m / u.km)
with pytest.raises(ValueError):
q1.itemset()
q1[1] = 1
assert q1[1] == 1000 * u.m / u.km
q1[1] = 100 * u.cm / u.km
assert q1[1] == 1 * u.m / u.km
with pytest.raises(TypeError):
q1[1] = 1.5 * u.m / u.km
def test_take_put(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
assert q1.take(1) == 2 * u.m / u.km
assert all(q1.take((0, 2)) == np.array([1, 3]) * u.m / u.km)
q1.put((1, 2), (3, 4))
assert np.all(q1.take((1, 2)) == np.array([3000, 4000]) * q1.unit)
q1.put(0, 500 * u.cm / u.km)
assert q1.item(0) == 5 * u.m / u.km
def test_slice(self):
"""Test that setitem changes the unit if needed (or ignores it for
values where that is allowed; viz., #2695)"""
q2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) * u.km / u.m
q1 = q2.copy()
q2[0, 0] = 10000.0
assert q2.unit == q1.unit
assert q2[0, 0].value == 10.0
q2[0] = 9.0 * u.Mm / u.km
assert all(q2.flatten()[:3].value == np.array([9.0, 9.0, 9.0]))
q2[0, :-1] = 8000.0
assert all(q2.flatten()[:3].value == np.array([8.0, 8.0, 9.0]))
with pytest.raises(u.UnitsError):
q2[1, 1] = 10 * u.s
# just to be sure, repeat with a dimensionfull unit
q3 = u.Quantity(np.arange(10.0), "m/s")
q3[5] = 100.0 * u.cm / u.s
assert q3[5].value == 1.0
# and check unit is ignored for 0, inf, nan, where that is reasonable
q3[5] = 0.0
assert q3[5] == 0.0
q3[5] = np.inf
assert np.isinf(q3[5])
q3[5] = np.nan
assert np.isnan(q3[5])
def test_fill(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q1.fill(2)
assert np.all(q1 == 2000 * u.m / u.km)
def test_repeat_compress_diagonal(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q2 = q1.repeat(2)
assert q2.unit == q1.unit
assert all(q2.value == q1.value.repeat(2))
q2.sort()
assert q2.unit == q1.unit
q2 = q1.compress(np.array([True, True, False, False]))
assert q2.unit == q1.unit
assert all(q2.value == q1.value.compress(np.array([True, True, False, False])))
q1 = np.array([[1, 2], [3, 4]]) * u.m / u.km
q2 = q1.diagonal()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.diagonal())
def test_view(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.view(np.ndarray)
assert not hasattr(q2, "unit")
q3 = q2.view(u.Quantity)
assert q3._unit is None
# MaskedArray copies and properties assigned in __dict__
q4 = np.ma.MaskedArray(q1)
assert q4._unit is q1._unit
q5 = q4.view(u.Quantity)
assert q5.unit is q1.unit
def test_slice_to_quantity(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2003
"""
a = np.random.uniform(size=(10, 8))
x, y, z = a[:, 1:4].T * u.km / u.s
total = np.sum(a[:, 1] * u.km / u.s - x)
assert isinstance(total, u.Quantity)
assert total == (0.0 * u.km / u.s)
def test_byte_type_view_field_changes(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.byteswap()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.byteswap())
q2 = q1.astype(np.float64)
assert all(q2 == q1)
assert q2.dtype == np.float64
q2a = q1.getfield(np.int32, offset=0)
q2b = q1.byteswap().getfield(np.int32, offset=4)
assert q2a.unit == q1.unit
assert all(q2b.byteswap() == q2a)
def test_sort(self):
q1 = np.array([1.0, 5.0, 2.0, 4.0]) * u.km / u.m
i = q1.argsort()
assert not hasattr(i, "unit")
q1.sort()
i = q1.searchsorted([1500, 2500])
assert not hasattr(i, "unit")
assert all(
i == q1.to(u.dimensionless_unscaled).value.searchsorted([1500, 2500])
)
def test_not_implemented(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
with pytest.raises(NotImplementedError):
q1.choose([0, 0, 1])
with pytest.raises(NotImplementedError):
q1.tolist()
with pytest.raises(NotImplementedError):
q1.tostring()
with pytest.raises(NotImplementedError):
q1.tobytes()
with pytest.raises(NotImplementedError):
q1.tofile(0)
with pytest.raises(NotImplementedError):
q1.dump("a.a")
with pytest.raises(NotImplementedError):
q1.dumps()
class TestStructuredArray:
"""Structured arrays are not specifically supported, but we should not
prevent their use unnecessarily.
Note that these tests use simple units. Now that structured units are
supported, it may make sense to deprecate this.
"""
def setup_method(self):
self.ra = (
np.array(np.arange(12.0).reshape(4, 3)).view(dtype="f8,f8,f8").squeeze()
)
def test_creation(self):
qra = u.Quantity(self.ra, u.m)
assert np.all(qra[:2].value == self.ra[:2])
def test_equality(self):
qra = u.Quantity(self.ra, u.m)
qra[1] = qra[2]
assert qra[1] == qra[2]
def test_assignment_with_non_structured(self):
qra = u.Quantity(self.ra, u.m)
qra[1] = 0
assert qra[1] == np.zeros(3).view(qra.dtype)
def test_assignment_with_different_names(self):
qra = u.Quantity(self.ra, u.m)
dtype = np.dtype([("x", "f8"), ("y", "f8"), ("z", "f8")])
value = np.array((-1.0, -2.0, -3.0), dtype) << u.km
qra[1] = value
assert qra[1] == value
assert qra[1].value == np.array((-1000.0, -2000.0, -3000.0), qra.dtype)
# Ensure we do not override dtype names of value.
assert value.dtype.names == ("x", "y", "z")
|
ad3af7027d73fda63fa574e89ab1052ce0599a3f6028a960b29cfd8a9444cc84 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test Structured units and quantities specifically with the ERFA ufuncs.
"""
import erfa
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.introspection import minversion
ERFA_LE_2_0_0 = not minversion(erfa, "2.0.0.1")
def vvd(val, valok, dval, func, test, status):
"""Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)"""
assert u.allclose(val, valok * val.unit, atol=dval * val.unit)
class TestPVUfuncs:
def setup_class(self):
self.pv_unit = u.Unit("AU,AU/day")
self.pv_value = np.array(
[
([1.0, 0.0, 0.0], [0.0, 0.0125, 0.0]),
([0.0, 1.0, 0.0], [-0.0125, 0.0, 0.0]),
],
dtype=erfa_ufunc.dt_pv,
)
self.pv = self.pv_value << self.pv_unit
def test_cpv(self):
pv_copy = erfa_ufunc.cpv(self.pv)
assert_array_equal(pv_copy, self.pv)
assert not np.may_share_memory(pv_copy, self.pv)
def test_p2pv(self):
p2pv = erfa_ufunc.p2pv(self.pv["p"])
assert_array_equal(p2pv["p"], self.pv["p"])
assert_array_equal(
p2pv["v"], np.zeros(self.pv.shape + (3,), float) << u.m / u.s
)
@pytest.mark.xfail(
erfa.__version__ <= "2.0.0",
reason="erfa bug; https://github.com/liberfa/pyerfa/issues/70)",
)
def test_p2pv_inplace(self):
# TODO: fix np.zeros_like.
out = np.zeros_like(self.pv_value) << self.pv_unit
p2pv = erfa_ufunc.p2pv(self.pv["p"], out=out)
assert out is p2pv
assert_array_equal(p2pv["p"], self.pv["p"])
assert_array_equal(
p2pv["v"], np.zeros(self.pv.shape + (3,), float) << u.m / u.s
)
def test_pv2p(self):
p = erfa_ufunc.pv2p(self.pv)
assert_array_equal(p, self.pv["p"])
out = np.zeros_like(p)
p2 = erfa_ufunc.pv2p(self.pv, out=out)
assert out is p2
assert_array_equal(p2, self.pv["p"])
def test_pv2s(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(self.pv.shape)) # latitude
assert r.unit == u.AU
assert_array_equal(r.value, np.ones(self.pv.shape))
assert td.unit == u.radian / u.day
assert_array_equal(td.value, np.array([0.0125] * 2))
assert pd.unit == u.radian / u.day
assert_array_equal(pd.value, np.zeros(self.pv.shape))
assert rd.unit == u.AU / u.day
assert_array_equal(rd.value, np.zeros(self.pv.shape))
def test_pv2s_non_standard_units(self):
pv = self.pv_value << u.Unit("Pa,Pa/m")
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian / u.m
assert_array_equal(td.value, np.array([0.0125] * 2))
assert pd.unit == u.radian / u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa / u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
@pytest.mark.xfail(
reason=(
"erfa ufuncs cannot take different names; it is not yet clear whether "
"this is changeable; see https://github.com/liberfa/pyerfa/issues/77"
)
)
def test_pv2s_non_standard_names_and_units(self):
pv_value = np.array(self.pv_value, dtype=[("pos", "f8"), ("vel", "f8")])
pv = pv_value << u.Unit("Pa,Pa/m")
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian / u.m
assert_array_equal(td.value, np.array([0.0125] * 2))
assert pd.unit == u.radian / u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa / u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
def test_s2pv(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
# On purpose change some of the units away from expected by s2pv.
pv = erfa_ufunc.s2pv(
theta.to(u.deg), phi, r.to(u.m), td.to(u.deg / u.day), pd, rd.to(u.m / u.s)
)
assert pv.unit == u.StructuredUnit("m, m/s", names=("p", "v"))
assert_quantity_allclose(pv["p"], self.pv["p"], atol=1 * u.m, rtol=0)
assert_quantity_allclose(pv["v"], self.pv["v"], atol=1 * u.mm / u.s, rtol=0)
def test_pvstar(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
assert_array_equal(stat, np.zeros(self.pv.shape, dtype="i4"))
assert ra.unit == u.radian
assert_quantity_allclose(ra, [0, 90] * u.deg)
assert dec.unit == u.radian
assert_array_equal(dec.value, np.zeros(self.pv.shape)) # latitude
assert pmr.unit == u.radian / u.year
assert_quantity_allclose(pmr, [0.0125, 0.0125] * u.radian / u.day)
assert pmd.unit == u.radian / u.year
assert_array_equal(pmd.value, np.zeros(self.pv.shape))
assert px.unit == u.arcsec
assert_quantity_allclose(px, 1 * u.radian)
assert rv.unit == u.km / u.s
assert_array_equal(rv.value, np.zeros(self.pv.shape))
def test_starpv(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
pv, stat = erfa_ufunc.starpv(
ra.to(u.deg), dec.to(u.deg), pmr, pmd, px, rv.to(u.m / u.s)
)
assert_array_equal(stat, np.zeros(self.pv.shape, dtype="i4"))
assert pv.unit == self.pv.unit
# Roundtrip is not as good as hoped on 32bit, not clear why.
# But proper motions are ridiculously high...
assert_quantity_allclose(pv["p"], self.pv["p"], atol=1 * u.m, rtol=0)
assert_quantity_allclose(pv["v"], self.pv["v"], atol=1 * u.m / u.s, rtol=0)
def test_pvtob(self):
pv = erfa_ufunc.pvtob(
[90, 0] * u.deg,
0.0 * u.deg,
100 * u.km,
0 * u.deg,
0 * u.deg,
0 * u.deg,
90 * u.deg,
)
assert pv.unit == u.StructuredUnit("m, m/s", names=("p", "v"))
assert pv.unit["v"] == u.m / u.s
assert_quantity_allclose(
pv["p"], [[-6478, 0, 0], [0, 6478, 0]] * u.km, atol=2 * u.km
)
assert_quantity_allclose(
pv["v"], [[0, -0.5, 0], [-0.5, 0, 0]] * u.km / u.s, atol=0.1 * u.km / u.s
)
def test_pvdpv(self):
pvdpv = erfa_ufunc.pvdpv(self.pv, self.pv)
assert pvdpv["pdp"].unit == self.pv.unit["p"] ** 2
assert pvdpv["pdv"].unit == self.pv.unit["p"] * self.pv.unit["v"]
assert_array_equal(
pvdpv["pdp"], np.einsum("...i,...i->...", self.pv["p"], self.pv["p"])
)
assert_array_equal(
pvdpv["pdv"], 2 * np.einsum("...i,...i->...", self.pv["p"], self.pv["v"])
)
z_axis = u.Quantity(np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv), "1,1/s")
pvdpv2 = erfa_ufunc.pvdpv(self.pv, z_axis)
assert pvdpv2["pdp"].unit == self.pv.unit["p"]
assert pvdpv2["pdv"].unit == self.pv.unit["v"]
assert_array_equal(pvdpv2["pdp"].value, np.zeros(self.pv.shape))
assert_array_equal(pvdpv2["pdv"].value, np.zeros(self.pv.shape))
def test_pvxpv(self):
pvxpv = erfa_ufunc.pvxpv(self.pv, self.pv)
assert pvxpv["p"].unit == self.pv.unit["p"] ** 2
assert pvxpv["v"].unit == self.pv.unit["p"] * self.pv.unit["v"]
assert_array_equal(pvxpv["p"].value, np.zeros(self.pv["p"].shape))
assert_array_equal(pvxpv["v"].value, np.zeros(self.pv["v"].shape))
z_axis = u.Quantity(np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv), "1,1/s")
pvxpv2 = erfa_ufunc.pvxpv(self.pv, z_axis)
assert pvxpv2["p"].unit == self.pv.unit["p"]
assert pvxpv2["v"].unit == self.pv.unit["v"]
assert_array_equal(pvxpv2["p"], [[0.0, -1, 0.0], [1.0, 0.0, 0.0]] * u.AU)
assert_array_equal(
pvxpv2["v"], [[0.0125, 0.0, 0.0], [0.0, 0.0125, 0.0]] * u.AU / u.day
)
def test_pvm(self):
pm, vm = erfa_ufunc.pvm(self.pv)
assert pm.unit == self.pv.unit["p"]
assert vm.unit == self.pv.unit["v"]
assert_array_equal(pm, np.linalg.norm(self.pv["p"], axis=-1))
assert_array_equal(vm, np.linalg.norm(self.pv["v"], axis=-1))
def test_pvmpv(self):
pvmpv = erfa_ufunc.pvmpv(self.pv, self.pv)
assert pvmpv.unit == self.pv.unit
assert_array_equal(pvmpv["p"], 0 * self.pv["p"])
assert_array_equal(pvmpv["v"], 0 * self.pv["v"])
def test_pvppv(self):
pvppv = erfa_ufunc.pvppv(self.pv, self.pv)
assert pvppv.unit == self.pv.unit
assert_array_equal(pvppv["p"], 2 * self.pv["p"])
assert_array_equal(pvppv["v"], 2 * self.pv["v"])
def test_pvu(self):
pvu = erfa_ufunc.pvu(86400 * u.s, self.pv)
assert pvu.unit == self.pv.unit
assert_array_equal(pvu["p"], self.pv["p"] + 1 * u.day * self.pv["v"])
assert_array_equal(pvu["v"], self.pv["v"])
def test_pvup(self):
pvup = erfa_ufunc.pvup(86400 * u.s, self.pv)
assert pvup.unit == self.pv.unit["p"]
assert_array_equal(pvup, self.pv["p"] + 1 * u.day * self.pv["v"])
def test_sxpv(self):
# Not a realistic example!!
sxpv = erfa_ufunc.sxpv(10.0, self.pv)
assert sxpv.unit == self.pv.unit
assert_array_equal(sxpv["p"], self.pv["p"] * 10)
assert_array_equal(sxpv["v"], self.pv["v"] * 10)
sxpv2 = erfa_ufunc.sxpv(30.0 * u.s, self.pv)
assert sxpv2.unit == u.StructuredUnit("AU s,AU s/d", names=("p", "v"))
assert_array_equal(sxpv2["p"], self.pv["p"] * 30 * u.s)
assert_array_equal(sxpv2["v"], self.pv["v"] * 30 * u.s)
def test_s2xpv(self):
# Not a realistic example!!
s2xpv = erfa_ufunc.s2xpv(10.0, 1 * u.s, self.pv)
assert s2xpv.unit == u.StructuredUnit("AU,AU s/d", names=("p", "v"))
assert_array_equal(s2xpv["p"], self.pv["p"] * 10)
assert_array_equal(s2xpv["v"], self.pv["v"] * u.s)
@pytest.mark.parametrize(
"r",
[
np.eye(3),
np.array(
[
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.eye(3) / u.s,
],
)
def test_rxpv(self, r):
result = erfa_ufunc.rxpv(r, self.pv)
assert_array_equal(result["p"], np.einsum("...ij,...j->...i", r, self.pv["p"]))
assert_array_equal(result["v"], np.einsum("...ij,...j->...i", r, self.pv["v"]))
@pytest.mark.parametrize(
"r",
[
np.eye(3),
np.array(
[
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.eye(3) / u.s,
],
)
def test_trxpv(self, r):
result = erfa_ufunc.trxpv(r, self.pv)
assert_array_equal(
result["p"], np.einsum("...ij,...j->...i", r.T, self.pv["p"])
)
assert_array_equal(
result["v"], np.einsum("...ij,...j->...i", r.T, self.pv["v"])
)
@pytest.mark.xfail(
erfa.__version__ < "1.7.3.1",
reason="dt_eraLDBODY incorrectly defined",
scope="class",
)
class TestEraStructUfuncs:
def setup_class(self):
ldbody = np.array(
[
(0.00028574, 3e-10, ([-7.81014427, -5.60956681, -1.98079819],
[0.0030723249, -0.00406995477, -0.00181335842])),
(0.00095435, 3e-9, ([0.738098796, 4.63658692, 1.9693136],
[-0.00755816922, 0.00126913722, 0.000727999001])),
(1.0, 6e-6, ([-0.000712174377, -0.00230478303, -0.00105865966],
[6.29235213e-6, -3.30888387e-7, -2.96486623e-7]))
],
dtype=erfa_ufunc.dt_eraLDBODY
) # fmt: skip
ldbody_unit = u.StructuredUnit("Msun,radian,(AU,AU/day)", ldbody.dtype)
self.ldbody = ldbody << ldbody_unit
self.ob = [-0.974170437, -0.2115201, -0.0917583114] << u.AU
self.sc = np.array([-0.763276255, -0.608633767, -0.216735543])
# From t_atciq in t_erfa_c.c
astrom, eo = erfa_ufunc.apci13(2456165.5, 0.401182685)
self.astrom_unit = u.StructuredUnit(
"yr,AU,1,AU,1,1,1,rad,rad,rad,rad,1,1,1,rad,rad,rad", astrom.dtype
)
self.astrom = astrom << self.astrom_unit
self.rc = 2.71 * u.rad
self.dc = 0.174 * u.rad
self.pr = 1e-5 * u.rad / u.year
self.pd = 5e-6 * u.rad / u.year
self.px = 0.1 * u.arcsec
self.rv = 55.0 * u.km / u.s
def test_ldn_basic(self):
sn = erfa_ufunc.ldn(self.ldbody, self.ob, self.sc)
assert_quantity_allclose(
sn,
[-0.7632762579693333866, -0.6086337636093002660, -0.2167355420646328159]
* u.one,
atol=1e-12,
rtol=0,
)
def test_ldn_in_other_unit(self):
ldbody = self.ldbody.to("kg,rad,(m,m/s)")
ob = self.ob.to("m")
sn = erfa_ufunc.ldn(ldbody, ob, self.sc)
assert_quantity_allclose(
sn,
[-0.7632762579693333866, -0.6086337636093002660, -0.2167355420646328159]
* u.one,
atol=1e-12,
rtol=0,
)
def test_ldn_in_SI(self):
sn = erfa_ufunc.ldn(self.ldbody.si, self.ob.si, self.sc)
assert_quantity_allclose(
sn,
[-0.7632762579693333866, -0.6086337636093002660, -0.2167355420646328159]
* u.one,
atol=1e-12,
rtol=0,
)
def test_aper(self):
along = self.astrom["along"]
astrom2 = erfa_ufunc.aper(10 * u.deg, self.astrom)
assert astrom2["eral"].unit == u.radian
assert_quantity_allclose(astrom2["eral"], along + 10 * u.deg)
astrom3 = self.astrom.to("s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,rad,rad,rad")
astrom4 = erfa_ufunc.aper(10 * u.deg, astrom3)
assert astrom3["eral"].unit == u.rad
assert astrom4["eral"].unit == u.deg
assert astrom4.unit == "s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,rad,rad"
assert_quantity_allclose(astrom4["eral"], along + 10 * u.deg)
def test_atciq_basic(self):
ri, di = erfa_ufunc.atciq(
self.rc, self.dc, self.pr, self.pd, self.px, self.rv, self.astrom
)
assert_quantity_allclose(ri, 2.710121572968696744 * u.rad)
assert_quantity_allclose(di, 0.1729371367219539137 * u.rad)
def test_atciq_in_other_unit(self):
astrom = self.astrom.to("s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,deg,deg")
ri, di = erfa_ufunc.atciq(
self.rc.to(u.deg),
self.dc.to(u.deg),
self.pr.to(u.mas / u.yr),
self.pd.to(u.mas / u.yr),
self.px,
self.rv.to(u.m / u.s),
astrom,
)
assert_quantity_allclose(ri, 2.710121572968696744 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(di, 0.1729371367219539137 * u.rad, atol=1e-12 * u.rad)
def test_atciqn(self):
ri, di = erfa_ufunc.atciqn(
self.rc.to(u.deg),
self.dc.to(u.deg),
self.pr.to(u.mas / u.yr),
self.pd.to(u.mas / u.yr),
self.px,
self.rv.to(u.m / u.s),
self.astrom.si,
self.ldbody.si,
)
assert_quantity_allclose(ri, 2.710122008104983335 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(di, 0.1729371916492767821 * u.rad, atol=1e-12 * u.rad)
def test_atciqz(self):
ri, di = erfa_ufunc.atciqz(self.rc.to(u.deg), self.dc.to(u.deg), self.astrom.si)
assert_quantity_allclose(ri, 2.709994899247256984 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(di, 0.1728740720984931891 * u.rad, atol=1e-12 * u.rad)
def test_aticq(self):
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
rc, dc = erfa_ufunc.aticq(ri.to(u.deg), di.to(u.deg), self.astrom.si)
assert_quantity_allclose(rc, 2.710126504531716819 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(dc, 0.1740632537627034482 * u.rad, atol=1e-12 * u.rad)
def test_aticqn(self):
ri = 2.709994899247599271 * u.rad
di = 0.1728740720983623469 * u.rad
rc, dc = erfa_ufunc.aticqn(
ri.to(u.deg), di.to(u.deg), self.astrom.si, self.ldbody.si
)
assert_quantity_allclose(rc, 2.709999575033027333 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(dc, 0.1739999656316469990 * u.rad, atol=1e-12 * u.rad)
def test_atioq_atoiq(self):
astrom, _ = erfa_ufunc.apio13(
2456384.5,
0.969254051,
0.1550675,
-0.527800806,
-1.2345856,
2738.0,
2.47230737e-7,
1.82640464e-6,
731.0,
12.8,
0.59,
0.55,
)
astrom = astrom << self.astrom_unit
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
aob, zob, hob, dob, rob = erfa_ufunc.atioq(
ri.to(u.deg), di.to(u.deg), astrom.si
)
assert_quantity_allclose(
aob, 0.9233952224895122499e-1 * u.rad, atol=1e-12 * u.rad
)
assert_quantity_allclose(zob, 1.407758704513549991 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(
hob, -0.9247619879881698140e-1 * u.rad, atol=1e-12 * u.rad
)
assert_quantity_allclose(dob, 0.1717653435756234676 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(rob, 2.710085107988480746 * u.rad, atol=1e-12 * u.rad)
# Sadly does not just use the values from above.
ob1 = 2.710085107986886201 * u.rad
ob2 = 0.1717653435758265198 * u.rad
ri2, di2 = erfa_ufunc.atoiq("R", ob1.to(u.deg), ob2.to(u.deg), astrom.si)
assert_quantity_allclose(ri2, 2.710121574447540810 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(
di2, 0.17293718391166087785 * u.rad, atol=1e-12 * u.rad
)
@pytest.mark.xfail(erfa.__version__ < "2.0.0", reason="comparisons changed")
def test_apio(self):
sp = -3.01974337e-11 * u.rad
theta = 3.14540971 * u.rad
elong = -0.527800806 * u.rad
phi = -1.2345856 * u.rad
hm = 2738.0 * u.m
xp = 2.47230737e-7 * u.rad
yp = 1.82640464e-6 * u.rad
refa = 0.000201418779 * u.rad
refb = -2.36140831e-7 * u.rad
astrom = erfa_ufunc.apio(
sp.to(u.deg), theta, elong, phi, hm.to(u.km), xp, yp, refa, refb
)
assert astrom.unit == self.astrom_unit
for name, value in [
("along", -0.5278008060295995734),
("xpl", 0.1133427418130752958e-5),
("ypl", 0.1453347595780646207e-5),
("sphi", -0.9440115679003211329),
("cphi", 0.3299123514971474711),
("diurab", 0.5135843661699913529e-6),
("eral", 2.617608903970400427),
("refa", 0.2014187790000000000e-3),
("refb", -0.2361408310000000000e-6),
]:
assert_quantity_allclose(
astrom[name],
value * self.astrom_unit[name],
rtol=1e-12,
atol=0 * self.astrom_unit[name],
)
class TestGeodetic:
def setup_class(self):
self.ellipsoid = 1
self.length_unit = u.Unit("m")
self.equatorial_radius_value = 6378136.0
self.equatorial_radius = self.equatorial_radius_value << self.length_unit
self.flattening = 0.0033528 * u.dimensionless_unscaled
self.lon_value = 0.9827937232473290680
self.lon_unit = u.Unit("rad")
self.lon = self.lon_value << self.lon_unit
self.lat_value = 0.9716018377570411532
self.lat_unit = u.Unit("rad")
self.lat = self.lat_value << self.lat_unit
self.height_value = 332.36862495764397
self.height = self.height_value << self.length_unit
self.x_value = 2e6
self.x = self.x_value << self.length_unit
self.y_value = 3e6
self.y = self.y_value << self.length_unit
self.z_value = 5.244e6
self.z = self.z_value << self.length_unit
self.xyz = np.stack([self.x, self.y, self.z])
def test_unit_errors(self):
"""Test unit errors when dimensionless parameters are used"""
msg = "'NoneType' object has no attribute '_get_converter'"
with pytest.raises(AttributeError, match=msg):
erfa_ufunc.gc2gde(self.equatorial_radius_value, self.flattening, self.xyz)
with pytest.raises(AttributeError, match=msg):
erfa_ufunc.gd2gce(
self.equatorial_radius_value,
self.flattening,
self.lon,
self.lat,
self.height,
)
with pytest.raises(AttributeError, match=msg):
erfa_ufunc.gc2gde(self.equatorial_radius, self.flattening, self.xyz.value)
with pytest.raises(AttributeError, match=msg):
erfa_ufunc.gd2gce(
self.equatorial_radius,
self.flattening,
self.lon_value,
self.lat,
self.height,
)
with pytest.raises(AttributeError, match=msg):
erfa_ufunc.gd2gce(
self.equatorial_radius,
self.flattening,
self.lon,
self.lat,
self.height_value,
)
def test_gc2gde(self):
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
status = 0
e, p, h, status = erfa_ufunc.gc2gde(
self.equatorial_radius, self.flattening, self.xyz
)
vvd(e, self.lon_value, 1e-14, "eraGc2gde", "e", status)
vvd(p, self.lat_value, 1e-14, "eraGc2gde", "p", status)
vvd(h, self.height_value, 1e-8, "eraGc2gde", "h", status)
def test_gd2gce(self):
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
status = 0
xyz, status = erfa_ufunc.gd2gce(
self.equatorial_radius, self.flattening, self.lon, self.lat, self.height
)
vvd(xyz[0], self.x_value, 1e-7, "eraGd2gce", "e", status)
vvd(xyz[1], self.y_value, 1e-7, "eraGd2gce", "p", status)
vvd(xyz[2], self.z_value, 1e-7, "eraGd2gce", "h", status)
|
5eef6ad987912baf1623fb25854a5a3ff82b1c8a662c4299f20c7699a6e23cbd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
import itertools
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g / u.s**2, u.Jy)
class TestLogUnitCreation:
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize("lu_unit, lu_cls", zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize("lu_unit", lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize(
"lu_unit, physical_unit", itertools.product(lu_units, pu_sample)
)
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize(
"lu_cls, physical_unit",
itertools.product(lu_subclasses + [u.LogUnit], pu_sample),
)
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit, function_unit=2 * lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2 * lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_lshift_magnitude(self):
mag = 1.0 << u.ABmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.ABmag
assert mag.value == 1.0
# same test for an array, which should produce a view
a2 = np.arange(10.0)
q2 = a2 << u.ABmag
assert isinstance(q2, u.Magnitude)
assert q2.unit == u.ABmag
assert np.all(q2.value == a2)
a2[9] = 0.0
assert np.all(q2.value == a2)
# a different magnitude unit
mag = 10.0 << u.STmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.STmag
assert mag.value == 10.0
def test_ilshift_magnitude(self):
# test in-place operation and conversion
mag_fnu_cgs = u.mag(u.erg / u.s / u.cm**2 / u.Hz)
m = np.arange(10.0) * u.mag(u.Jy)
jy = m.physical
m2 = m << mag_fnu_cgs
assert np.all(m2 == m.to(mag_fnu_cgs))
m2 = m
m <<= mag_fnu_cgs
assert m is m2 # Check it was done in-place!
assert np.all(m.value == m2.value)
assert m.unit == mag_fnu_cgs
# Check it works if equivalencies are in-place.
with u.add_enabled_equivalencies(u.spectral_density(5500 * u.AA)):
st = jy.to(u.ST)
m <<= u.STmag
assert m is m2
assert_quantity_allclose(m.physical, st)
assert m.unit == u.STmag
def test_lshift_errors(self):
m = np.arange(10.0) * u.mag(u.Jy)
with pytest.raises(u.UnitsError):
m << u.STmag
with pytest.raises(u.UnitsError):
m << u.Jy
with pytest.raises(u.UnitsError):
m <<= u.STmag
with pytest.raises(u.UnitsError):
m <<= u.Jy
def test_predefined_magnitudes():
assert_quantity_allclose(
(-21.1 * u.STmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.AA
)
assert_quantity_allclose(
(-48.6 * u.ABmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.Hz
)
assert_quantity_allclose((0 * u.M_bol).physical, c.L_bol0)
assert_quantity_allclose(
(0 * u.m_bol).physical, c.L_bol0 / (4.0 * np.pi * (10.0 * c.pc) ** 2)
)
def test_predefined_reinitialisation():
assert u.mag("STflux") == u.STmag
assert u.mag("ABflux") == u.ABmag
assert u.mag("Bol") == u.M_bol
assert u.mag("bol") == u.m_bol
# required for backwards-compatibility, at least unless deprecated
assert u.mag("ST") == u.STmag
assert u.mag("AB") == u.ABmag
def test_predefined_string_roundtrip():
"""Ensure round-tripping; see #5015"""
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regression for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings:
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == "mag(Jy)"
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string("generic") == "mag(Jy)"
with pytest.raises(ValueError):
lu1.to_string("fits")
with pytest.raises(ValueError):
lu1.to_string(format="cds")
lu2 = u.dex()
assert str(lu2) == "dex"
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == "dex(1)"
lu3 = u.MagUnit(u.Jy, function_unit=2 * u.mag)
assert str(lu3) == "2 mag(Jy)"
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == "2 mag(Jy)"
lu4 = u.mag(u.ct)
assert lu4.to_string("generic") == "mag(ct)"
latex_str = r"$\mathrm{mag}$$\mathrm{\left( \mathrm{ct} \right)}$"
assert lu4.to_string("latex") == latex_str
assert lu4.to_string("latex_inline") == latex_str
assert lu4._repr_latex_() == latex_str
lu5 = u.mag(u.ct / u.s)
assert lu5.to_string("latex") == (
r"$\mathrm{mag}$$\mathrm{\left( " r"\mathrm{\frac{ct}{s}} \right)}$"
)
latex_str = r"$\mathrm{mag}$$\mathrm{\left( \mathrm{ct\,s^{-1}} " r"\right)}$"
assert lu5.to_string("latex_inline") == latex_str
class TestLogUnitConversion:
@pytest.mark.parametrize(
"lu_unit, physical_unit", itertools.product(lu_units, pu_sample)
)
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.0) == 1.0
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.0) == 0.0
pu = u.Unit(8.0 * physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.0) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0.0, atol=1.0e-15)
# Check we round-trip.
value = np.linspace(0.0, 10.0, 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.0e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize("lu_unit", lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0.0, 10.0, 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
"flu_unit, tlu_unit, physical_unit",
itertools.product(lu_units, lu_units, pu_sample),
)
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0.0, 10.0, 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(
flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)
)
tlu2 = tlu_unit(u.Unit(100.0 * physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.0e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.0e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
def test_magnitude_conversion_fails_message(self):
"""Check that "dimensionless" magnitude units include a message in their
exception text suggesting a possible cause of the problem.
"""
with pytest.raises(
u.UnitConversionError,
match="Did you perhaps subtract magnitudes so the unit got lost?",
):
(10 * u.ABmag - 2 * u.ABmag).to(u.nJy)
class TestLogUnitArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(
t.to(u.dimensionless_unscaled, np.arange(3.0) / 100.0),
lu2.to(lu2.physical_unit, np.arange(3.0)),
)
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize("power", (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1**power == u.dimensionless_unscaled
elif power == 1:
assert lu1**power == lu1
else:
with pytest.raises(u.UnitsError):
lu1**power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t ** (1.0 / power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(
t2.to(u.dimensionless_unscaled, np.arange(3.0)),
lu2.to(lu2.physical_unit, np.arange(3.0)),
)
@pytest.mark.parametrize("other", pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.0
with pytest.raises(TypeError):
lu1 - [1.0, 2.0, 3.0]
@pytest.mark.parametrize(
"other",
(
u.mag,
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.Unit(2 * u.mag),
u.MagUnit("", 2.0 * u.mag),
),
)
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, "physical_unit", u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit("DM", 1.0 / (4.0 * np.pi * (10.0 * u.pc) ** 2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg / u.s / u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm / u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation:
@pytest.mark.parametrize(
"lq, lu", zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])
)
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.0)) is lq
@pytest.mark.parametrize(
"lq_cls, physical_unit", itertools.product(lq_subclasses, pu_sample)
)
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1.0, 10.0)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
"unit",
(
u.mag,
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.Unit(2 * u.mag),
u.MagUnit("", 2.0 * u.mag),
u.MagUnit(u.Jy, -1 * u.mag),
u.MagUnit(u.m, -2.0 * u.mag),
),
)
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, "function_unit", unit)
assert q.unit.physical_unit is getattr(
unit, "physical_unit", u.dimensionless_unscaled
)
@pytest.mark.parametrize(
"value, unit",
(
(1.0 * u.mag(u.Jy), None),
(1.0 * u.dex(u.Jy), None),
(1.0 * u.mag(u.W / u.m**2 / u.Hz), u.mag(u.Jy)),
(1.0 * u.dex(u.W / u.m**2 / u.Hz), u.mag(u.Jy)),
),
)
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(
unit, "physical_unit", value.unit.physical_unit
)
@pytest.mark.parametrize(
"unit",
(
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.MagUnit("", 2.0 * u.mag),
u.MagUnit(u.Jy, -1 * u.mag),
u.MagUnit(u.m, -2.0 * u.mag),
),
)
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100.0 * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.0
assert (q2._function_view / u.mag).to_value(1) == -5.0
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100.0, 1000.0] * u.cm / u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2.0, 3.0] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1.0, lu)
q = u.Quantity(1.0, lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10.0, 12.0, 14.0] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.0 * u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews:
def setup_method(self):
self.lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
self.lq2 = u.Magnitude(np.arange(1.0, 5.0))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.0
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2.0 * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing:
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1.0, 11.0) * u.Jy)
assert lq1[9] == u.Magnitude(10.0 * u.Jy)
lq1[2] = 100.0 * u.Jy
assert lq1[2] == u.Magnitude(100.0 * u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.0 * u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.0 * u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.0 * u.m)
assert lq1[2] == u.Magnitude(100.0 * u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
lq1[2:4] = 100.0 * u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.0 * u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.0 * u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.0 * u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.0 * u.m)
assert np.all(lq1[2] == u.Magnitude(100.0 * u.Jy))
class TestLogQuantityArithmetic:
@pytest.mark.parametrize(
"other",
[
2.4 * u.mag(),
12.34 * u.ABmag,
u.Magnitude(3.45 * u.Jy),
u.Dex(3.0),
u.Dex(np.linspace(3000, 5000, 10) * u.Angstrom),
u.Magnitude(6.78, 2.0 * u.mag),
],
)
@pytest.mark.parametrize("fac", [1.0, 2, 0.4])
def test_multiplication_division(self, other, fac):
"""Check that multiplication and division work as expected"""
lq_sf = fac * other
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other * fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other / fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
lq_sf = other.copy()
lq_sf *= fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other.copy()
lq_sf /= fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
def test_more_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this keeps
the result as a LogQuantity if possible."""
lq = u.Magnitude(np.arange(1.0, 11.0) * u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.0 * u.m)
with pytest.raises(u.UnitsError):
(1.0 * u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.0))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
lq_sf = lq.copy()
with pytest.raises(u.UnitsError):
lq_sf *= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
with pytest.raises(u.UnitsError):
lq_sf /= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.0)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value / 2.0)
# And multiplying with a dimensionless array is also OK.
r2 = lq2 * np.arange(10.0)
assert isinstance(r2, u.Magnitude)
assert np.all(r2 == lq2._function_view * np.arange(10.0))
# with dimensionless, normal units OK, but return normal quantities
# if the unit no longer is consistent with the logarithmic unit.
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.0 * u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view * 2)
@pytest.mark.parametrize("power", (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
if power == 0:
assert np.all(lq**power == 1.0)
elif power == 1:
assert np.all(lq**power == lq)
else:
with pytest.raises(u.UnitsError):
lq**power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.0))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.0)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit**power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
with pytest.raises(TypeError):
lq**lq
@pytest.mark.parametrize("other", pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
"other",
(
1.23 * u.mag,
2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy),
u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2 * u.mag),
u.Magnitude(6.78, 2.0 * u.mag),
),
)
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
other_physical = other.to(
getattr(other.unit, "physical_unit", u.dimensionless_unscaled),
equivalencies=u.logarithmic(),
)
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize("other", pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1.0, 10.0), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1.0, 10.0))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1.0, 10.0))
assert lq1.unit == lu1
@pytest.mark.parametrize(
"other",
(
1.23 * u.mag,
2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy),
u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2 * u.mag),
u.Magnitude(6.78, 2.0 * u.mag),
),
)
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
other_physical = other.to(
getattr(other.unit, "physical_unit", u.dimensionless_unscaled),
equivalencies=u.logarithmic(),
)
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit("DM", 1.0 / (4.0 * np.pi * (10.0 * u.pc) ** 2))
DMmag = u.mag(dm0)
m_st = 10.0 * u.STmag
dm = 5.0 * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg / u.s / u.AA)
ratio = M_st.physical / (m_st.physical * 4.0 * np.pi * (100.0 * u.pc) ** 2)
assert np.abs(ratio - 1.0) < 1.0e-15
class TestLogQuantityComparisons:
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
with pytest.raises(TypeError):
lq > "a" # noqa: B015
assert not (lq == "a")
assert lq != "a"
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
lq2 = u.Magnitude(2.0 * u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.0 * u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.0 * u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4 # noqa: B015
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.0 * u.m # noqa: B015
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag # noqa: B015
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1.0, 4.0))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.0 * u.m # noqa: B015
class TestLogQuantityMethods:
def setup_method(self):
self.mJy = np.arange(1.0, 5.0).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1.0, 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize(
"method",
(
"mean",
"min",
"max",
"round",
"trace",
"std",
"var",
"ptp",
"diff",
"ediff1d",
),
)
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value == getattr(mag._function_view, method)().value)
if method in ("std", "ptp", "diff", "ediff1d"):
assert res.unit == u.mag()
elif method == "var":
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(
mag.clip(2.0 * mag.unit, 4.0 * mag.unit).value
== mag.value.clip(2.0, 4.0)
)
@pytest.mark.parametrize("method", ("sum", "cumsum"))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value == getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value == self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize("method", ("prod", "cumprod"))
def test_never_ok(self, method):
with pytest.raises(TypeError):
getattr(self.mJy, method)()
with pytest.raises(TypeError):
getattr(self.m1, method)()
|
0a8756b899bbf670d3a8ac7af696241a1b22029e4e774f7fa9c356c727ecc9d9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test Structured units and quantities.
"""
import copy
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.tests.helper import check_pickling_recovery, pickle_protocol # noqa: F401
from astropy.units import Quantity, StructuredUnit, Unit, UnitBase
from astropy.units.quantity import _structured_unit_like_dtype
from astropy.utils.compat import NUMPY_LT_1_21_1
from astropy.utils.masked import Masked
class StructuredTestBase:
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype([("pv", self.pv_dtype), ("t", "f8")])
self.p_unit = u.km
self.v_unit = u.km / u.s
self.t_unit = u.s
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype([("pv", self.pv_dtype), ("t", "f8")])
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[
((4.0, 2.5), 0.0),
((5.0, 5.0), 1.0),
((6.0, 7.5), 2.0),
],
self.pv_t_dtype,
)
class StructuredTestBaseWithUnits(StructuredTestBase):
@classmethod
def setup_class(self):
super().setup_class()
self.pv_unit = StructuredUnit((self.p_unit, self.v_unit), ("p", "v"))
self.pv_t_unit = StructuredUnit((self.pv_unit, self.t_unit), ("pv", "t"))
class TestStructuredUnitBasics(StructuredTestBase):
def test_initialization_and_keying(self):
su = StructuredUnit((self.p_unit, self.v_unit), ("p", "v"))
assert su["p"] is self.p_unit
assert su["v"] is self.v_unit
su2 = StructuredUnit((su, self.t_unit), ("pv", "t"))
assert isinstance(su2["pv"], StructuredUnit)
assert su2["pv"]["p"] is self.p_unit
assert su2["pv"]["v"] is self.v_unit
assert su2["t"] is self.t_unit
assert su2["pv"] == su
su3 = StructuredUnit(("AU", "AU/day"), ("p", "v"))
assert isinstance(su3["p"], UnitBase)
assert isinstance(su3["v"], UnitBase)
su4 = StructuredUnit("AU, AU/day", ("p", "v"))
assert su4["p"] == u.AU
assert su4["v"] == u.AU / u.day
su5 = StructuredUnit(("AU", "AU/day"))
assert su5.field_names == ("f0", "f1")
assert su5["f0"] == u.AU
assert su5["f1"] == u.AU / u.day
def test_recursive_initialization(self):
su = StructuredUnit(
((self.p_unit, self.v_unit), self.t_unit), (("p", "v"), "t")
)
assert isinstance(su["pv"], StructuredUnit)
assert su["pv"]["p"] is self.p_unit
assert su["pv"]["v"] is self.v_unit
assert su["t"] is self.t_unit
su2 = StructuredUnit(
((self.p_unit, self.v_unit), self.t_unit), (["p_v", ("p", "v")], "t")
)
assert isinstance(su2["p_v"], StructuredUnit)
assert su2["p_v"]["p"] is self.p_unit
assert su2["p_v"]["v"] is self.v_unit
assert su2["t"] is self.t_unit
su3 = StructuredUnit((("AU", "AU/day"), "yr"), (["p_v", ("p", "v")], "t"))
assert isinstance(su3["p_v"], StructuredUnit)
assert su3["p_v"]["p"] == u.AU
assert su3["p_v"]["v"] == u.AU / u.day
assert su3["t"] == u.yr
su4 = StructuredUnit("(AU, AU/day), yr", (("p", "v"), "t"))
assert isinstance(su4["pv"], StructuredUnit)
assert su4["pv"]["p"] == u.AU
assert su4["pv"]["v"] == u.AU / u.day
assert su4["t"] == u.yr
def test_extreme_recursive_initialization(self):
su = StructuredUnit(
"(yr,(AU,AU/day,(km,(day,day))),m)",
("t", ("p", "v", ("h", ("d1", "d2"))), "l"),
)
assert su.field_names == (
't', ['pvhd1d2',
('p', 'v',
['hd1d2',
('h',
['d1d2',
('d1', 'd2')])])],
'l',
) # fmt: skip
dt = np.dtype(
[("t", "f8"),
("pvhd1d2",
([("p", "f8"), ("v", "f8"), ("hd1d2",
[("h", "f8"), ("d1d2",
[("d1", "f8"), ("d2", "f8")]),
]),
], (5, 5))), # Note: structured subarray to improve test!
("l", "f8")
]) # fmt: skip
su2 = StructuredUnit("(yr,(AU,AU/day,(km,(day,day))),m)", dt)
assert su2.field_names == su.field_names
assert su2 == su
@pytest.mark.parametrize(
"names, invalid",
[
[("t", ["p", "v"]), "['p', 'v']"],
[("t", ["pv", "p", "v"]), "['pv', 'p', 'v']"],
[("t", ["pv", ["p", "v"]]), "['pv', ['p', 'v']"],
[("t", ()), "()"],
[("t", ("p", None)), "None"],
[("t", ["pv", ("p", "")]), "''"],
],
)
def test_initialization_names_invalid_list_errors(self, names, invalid):
with pytest.raises(ValueError) as exc:
StructuredUnit("yr,(AU,AU/day)", names)
assert f"invalid entry {invalid}" in str(exc)
def test_looks_like_unit(self):
su = StructuredUnit((self.p_unit, self.v_unit), ("p", "v"))
assert Unit(su) is su
def test_initialize_with_float_dtype(self):
su = StructuredUnit(("AU", "AU/d"), self.pv_dtype)
assert isinstance(su["p"], UnitBase)
assert isinstance(su["v"], UnitBase)
assert su["p"] == u.AU
assert su["v"] == u.AU / u.day
su = StructuredUnit((("km", "km/s"), "yr"), self.pv_t_dtype)
assert isinstance(su["pv"], StructuredUnit)
assert isinstance(su["pv"]["p"], UnitBase)
assert isinstance(su["t"], UnitBase)
assert su["pv"]["v"] == u.km / u.s
su = StructuredUnit("(km, km/s), yr", self.pv_t_dtype)
assert isinstance(su["pv"], StructuredUnit)
assert isinstance(su["pv"]["p"], UnitBase)
assert isinstance(su["t"], UnitBase)
assert su["pv"]["v"] == u.km / u.s
def test_initialize_with_structured_unit_for_names(self):
su = StructuredUnit(("AU", "AU/d"), names=("p", "v"))
su2 = StructuredUnit(("km", "km/s"), names=su)
assert su2.field_names == ("p", "v")
assert su2["p"] == u.km
assert su2["v"] == u.km / u.s
def test_initialize_single_field(self):
su = StructuredUnit("AU", "p")
assert isinstance(su, StructuredUnit)
assert isinstance(su["p"], UnitBase)
assert su["p"] == u.AU
su = StructuredUnit("AU")
assert isinstance(su, StructuredUnit)
assert isinstance(su["f0"], UnitBase)
assert su["f0"] == u.AU
def test_equality(self):
su = StructuredUnit(("AU", "AU/d"), self.pv_dtype)
assert su == StructuredUnit(("AU", "AU/d"), self.pv_dtype)
assert su != StructuredUnit(("m", "AU/d"), self.pv_dtype)
# Names should be ignored.
assert su == StructuredUnit(("AU", "AU/d"))
assert su == StructuredUnit(("AU", "AU/d"), names=("q", "w"))
assert su != StructuredUnit(("m", "m/s"))
def test_parsing(self):
su = Unit("AU, AU/d")
assert isinstance(su, StructuredUnit)
assert isinstance(su["f0"], UnitBase)
assert isinstance(su["f1"], UnitBase)
assert su["f0"] == u.AU
assert su["f1"] == u.AU / u.day
su2 = Unit("AU, AU/d, yr")
assert isinstance(su2, StructuredUnit)
assert su2 == StructuredUnit(("AU", "AU/d", "yr"))
su2a = Unit("(AU, AU/d, yr)")
assert isinstance(su2a, StructuredUnit)
assert su2a == su2
su3 = Unit("(km, km/s), yr")
assert isinstance(su3, StructuredUnit)
assert su3 == StructuredUnit((("km", "km/s"), "yr"))
su4 = Unit("km,")
assert isinstance(su4, StructuredUnit)
assert su4 == StructuredUnit((u.km,))
su5 = Unit("(m,s),")
assert isinstance(su5, StructuredUnit)
assert su5 == StructuredUnit(((u.m, u.s),))
ldbody_unit = Unit("Msun, 0.5rad^2, (au, au/day)")
assert ldbody_unit == StructuredUnit(
(u.Msun, Unit(u.rad**2 / 2), (u.AU, u.AU / u.day))
)
def test_to_string(self):
su = StructuredUnit((u.km, u.km / u.s))
latex_str = r"$(\mathrm{km}, \mathrm{\frac{km}{s}})$"
assert su.to_string(format="latex") == latex_str
latex_str = r"$(\mathrm{km}, \mathrm{km\,s^{-1}})$"
assert su.to_string(format="latex_inline") == latex_str
def test_str(self):
su = StructuredUnit(((u.km, u.km / u.s), u.yr))
assert str(su) == "((km, km / s), yr)"
assert Unit(str(su)) == su
def test_repr(self):
su = StructuredUnit(((u.km, u.km / u.s), u.yr))
assert repr(su) == 'Unit("((km, km / s), yr)")'
assert eval(repr(su)) == su
class TestStructuredUnitsCopyPickle(StructuredTestBaseWithUnits):
def test_copy(self):
su_copy = copy.copy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is self.pv_t_unit._units
def test_deepcopy(self):
su_copy = copy.deepcopy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is not self.pv_t_unit._units
@pytest.mark.skipif(NUMPY_LT_1_21_1, reason="https://stackoverflow.com/q/69571643")
def test_pickle(self, pickle_protocol): # noqa: F811
check_pickling_recovery(self.pv_t_unit, pickle_protocol)
class TestStructuredUnitAsMapping(StructuredTestBaseWithUnits):
def test_len(self):
assert len(self.pv_unit) == 2
assert len(self.pv_t_unit) == 2
def test_keys(self):
slv = list(self.pv_t_unit.keys())
assert slv == ["pv", "t"]
def test_values(self):
values = self.pv_t_unit.values()
assert values == (self.pv_unit, self.t_unit)
def test_field_names(self):
field_names = self.pv_t_unit.field_names
assert isinstance(field_names, tuple)
assert field_names == (["pv", ("p", "v")], "t")
@pytest.mark.parametrize("iterable", [list, set])
def test_as_iterable(self, iterable):
sl = iterable(self.pv_unit)
assert isinstance(sl, iterable)
assert sl == iterable(["p", "v"])
def test_as_dict(self):
sd = dict(self.pv_t_unit)
assert sd == {"pv": self.pv_unit, "t": self.t_unit}
def test_contains(self):
assert "p" in self.pv_unit
assert "v" in self.pv_unit
assert "t" not in self.pv_unit
def test_setitem_fails(self):
with pytest.raises(TypeError, match="item assignment"):
self.pv_t_unit["t"] = u.Gyr
class TestStructuredUnitMethods(StructuredTestBaseWithUnits):
def test_physical_type_id(self):
pv_ptid = self.pv_unit._get_physical_type_id()
assert len(pv_ptid) == 2
assert pv_ptid.dtype.names == ("p", "v")
p_ptid = self.pv_unit["p"]._get_physical_type_id()
v_ptid = self.pv_unit["v"]._get_physical_type_id()
# Expected should be (subclass of) void, with structured object dtype.
expected = np.array((p_ptid, v_ptid), [("p", "O"), ("v", "O")])[()]
assert pv_ptid == expected
# Names should be ignored in comparison.
assert pv_ptid == np.array((p_ptid, v_ptid), "O,O")[()]
# Should be possible to address by field and by number.
assert pv_ptid["p"] == p_ptid
assert pv_ptid["v"] == v_ptid
assert pv_ptid[0] == p_ptid
assert pv_ptid[1] == v_ptid
# More complicated version.
pv_t_ptid = self.pv_t_unit._get_physical_type_id()
t_ptid = self.t_unit._get_physical_type_id()
assert pv_t_ptid == np.array((pv_ptid, t_ptid), "O,O")[()]
assert pv_t_ptid["pv"] == pv_ptid
assert pv_t_ptid["t"] == t_ptid
assert pv_t_ptid["pv"][1] == v_ptid
def test_physical_type(self):
pv_pt = self.pv_unit.physical_type
assert pv_pt == np.array(("length", "speed"), "O,O")[()]
pv_t_pt = self.pv_t_unit.physical_type
assert pv_t_pt == np.array((pv_pt, "time"), "O,O")[()]
def test_si(self):
pv_t_si = self.pv_t_unit.si
assert pv_t_si == self.pv_t_unit
assert pv_t_si["pv"]["v"].scale == 1000
def test_cgs(self):
pv_t_cgs = self.pv_t_unit.cgs
assert pv_t_cgs == self.pv_t_unit
assert pv_t_cgs["pv"]["v"].scale == 100000
def test_decompose(self):
pv_t_decompose = self.pv_t_unit.decompose()
assert pv_t_decompose["pv"]["v"].scale == 1000
def test_is_equivalent(self):
assert self.pv_unit.is_equivalent(("AU", "AU/day"))
assert not self.pv_unit.is_equivalent("m")
assert not self.pv_unit.is_equivalent(("AU", "AU"))
# Names should be ignored.
pv_alt = StructuredUnit("m,m/s", names=("q", "w"))
assert pv_alt.field_names != self.pv_unit.field_names
assert self.pv_unit.is_equivalent(pv_alt)
# Regular units should work too.
assert not u.m.is_equivalent(self.pv_unit)
def test_conversion(self):
pv1 = self.pv_unit.to(("AU", "AU/day"), self.pv)
assert isinstance(pv1, np.ndarray)
assert pv1.dtype == self.pv.dtype
assert np.all(pv1["p"] * u.AU == self.pv["p"] * self.p_unit)
assert np.all(pv1["v"] * u.AU / u.day == self.pv["v"] * self.v_unit)
# Names should be from value.
su2 = StructuredUnit((self.p_unit, self.v_unit), ("position", "velocity"))
pv2 = su2.to(("Mm", "mm/s"), self.pv)
assert pv2.dtype.names == ("p", "v")
assert pv2.dtype == self.pv.dtype
# Check recursion.
pv_t1 = self.pv_t_unit.to((("AU", "AU/day"), "Myr"), self.pv_t)
assert isinstance(pv_t1, np.ndarray)
assert pv_t1.dtype == self.pv_t.dtype
assert np.all(pv_t1["pv"]["p"] * u.AU == self.pv_t["pv"]["p"] * self.p_unit)
assert np.all(
pv_t1["pv"]["v"] * u.AU / u.day == self.pv_t["pv"]["v"] * self.v_unit
)
assert np.all(pv_t1["t"] * u.Myr == self.pv_t["t"] * self.t_unit)
# Passing in tuples should work.
pv_t2 = self.pv_t_unit.to((("AU", "AU/day"), "Myr"), ((1.0, 0.1), 10.0))
assert pv_t2["pv"]["p"] == self.p_unit.to("AU", 1.0)
assert pv_t2["pv"]["v"] == self.v_unit.to("AU/day", 0.1)
assert pv_t2["t"] == self.t_unit.to("Myr", 10.0)
pv_t3 = self.pv_t_unit.to(
(("AU", "AU/day"), "Myr"), [((1.0, 0.1), 10.0), ((2.0, 0.2), 20.0)]
)
assert np.all(pv_t3["pv"]["p"] == self.p_unit.to("AU", [1.0, 2.0]))
assert np.all(pv_t3["pv"]["v"] == self.v_unit.to("AU/day", [0.1, 0.2]))
assert np.all(pv_t3["t"] == self.t_unit.to("Myr", [10.0, 20.0]))
class TestStructuredUnitArithmatic(StructuredTestBaseWithUnits):
def test_multiplication(self):
pv_times_au = self.pv_unit * u.au
assert isinstance(pv_times_au, StructuredUnit)
assert pv_times_au.field_names == ("p", "v")
assert pv_times_au["p"] == self.p_unit * u.AU
assert pv_times_au["v"] == self.v_unit * u.AU
au_times_pv = u.au * self.pv_unit
assert au_times_pv == pv_times_au
pv_times_au2 = self.pv_unit * "au"
assert pv_times_au2 == pv_times_au
au_times_pv2 = "AU" * self.pv_unit
assert au_times_pv2 == pv_times_au
with pytest.raises(TypeError):
self.pv_unit * self.pv_unit
with pytest.raises(TypeError):
"s,s" * self.pv_unit
def test_division(self):
pv_by_s = self.pv_unit / u.s
assert isinstance(pv_by_s, StructuredUnit)
assert pv_by_s.field_names == ("p", "v")
assert pv_by_s["p"] == self.p_unit / u.s
assert pv_by_s["v"] == self.v_unit / u.s
pv_by_s2 = self.pv_unit / "s"
assert pv_by_s2 == pv_by_s
with pytest.raises(TypeError):
1.0 / self.pv_unit
with pytest.raises(TypeError):
u.s / self.pv_unit
class TestStructuredQuantity(StructuredTestBaseWithUnits):
def test_initialization_and_keying(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_p = q_pv["p"]
assert isinstance(q_p, Quantity)
assert isinstance(q_p.unit, UnitBase)
assert np.all(q_p == self.pv["p"] * self.pv_unit["p"])
q_v = q_pv["v"]
assert isinstance(q_v, Quantity)
assert isinstance(q_v.unit, UnitBase)
assert np.all(q_v == self.pv["v"] * self.pv_unit["v"])
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_t = q_pv_t["t"]
assert np.all(q_t == self.pv_t["t"] * self.pv_t_unit["t"])
q_pv2 = q_pv_t["pv"]
assert isinstance(q_pv2, Quantity)
assert q_pv2.unit == self.pv_unit
with pytest.raises(ValueError):
Quantity(self.pv, self.pv_t_unit)
with pytest.raises(ValueError):
Quantity(self.pv_t, self.pv_unit)
def test_initialization_with_unit_tuples(self):
q_pv_t = Quantity(self.pv_t, (("km", "km/s"), "s"))
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_with_string(self):
q_pv_t = Quantity(self.pv_t, "(km, km/s), s")
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_by_multiplication_with_unit(self):
q_pv_t = self.pv_t * self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert not np.may_share_memory(q_pv_t, self.pv_t)
q_pv_t2 = self.pv_t_unit * self.pv_t
assert q_pv_t.unit is self.pv_t_unit
# Not testing equality of structured Quantity here.
assert np.all(q_pv_t2.value == q_pv_t.value)
def test_initialization_by_shifting_to_unit(self):
q_pv_t = self.pv_t << self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert np.may_share_memory(q_pv_t, self.pv_t)
def test_initialization_without_unit(self):
q_pv_t = u.Quantity(self.pv_t, unit=None)
assert np.all(q_pv_t.value == self.pv_t)
# Test that unit is a structured unit like the dtype
expected_unit = _structured_unit_like_dtype(
u.Quantity._default_unit, self.pv_t.dtype
)
assert q_pv_t.unit == expected_unit
# A more explicit test
assert q_pv_t.unit == u.StructuredUnit(((u.one, u.one), u.one))
def test_getitem(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t01 = q_pv_t[:2]
assert isinstance(q_pv_t01, Quantity)
assert q_pv_t01.unit == q_pv_t.unit
assert np.all(q_pv_t01["t"] == q_pv_t["t"][:2])
q_pv_t1 = q_pv_t[1]
assert isinstance(q_pv_t1, Quantity)
assert q_pv_t1.unit == q_pv_t.unit
assert q_pv_t1.shape == ()
assert q_pv_t1["t"] == q_pv_t["t"][1]
def test_value(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
value = q_pv_t.value
assert type(value) is np.ndarray
assert np.all(value == self.pv_t)
value1 = q_pv_t[1].value
assert type(value1) is np.void
assert np.all(value1 == self.pv_t[1])
def test_conversion(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.to(("AU", "AU/day"))
assert isinstance(q1, Quantity)
assert q1["p"].unit == u.AU
assert q1["v"].unit == u.AU / u.day
assert np.all(q1["p"] == q_pv["p"].to(u.AU))
assert np.all(q1["v"] == q_pv["v"].to(u.AU / u.day))
q2 = q_pv.to(self.pv_unit)
assert q2["p"].unit == self.p_unit
assert q2["v"].unit == self.v_unit
assert np.all(q2["p"].value == self.pv["p"])
assert np.all(q2["v"].value == self.pv["v"])
assert not np.may_share_memory(q2, q_pv)
pv1 = q_pv.to_value(("AU", "AU/day"))
assert type(pv1) is np.ndarray
assert np.all(pv1["p"] == q_pv["p"].to_value(u.AU))
assert np.all(pv1["v"] == q_pv["v"].to_value(u.AU / u.day))
pv11 = q_pv[1].to_value(("AU", "AU/day"))
assert type(pv11) is np.void
assert pv11 == pv1[1]
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.to((("kpc", "kpc/Myr"), "Myr"))
assert q2["pv"]["p"].unit == u.kpc
assert q2["pv"]["v"].unit == u.kpc / u.Myr
assert q2["t"].unit == u.Myr
assert np.all(q2["pv"]["p"] == q_pv_t["pv"]["p"].to(u.kpc))
assert np.all(q2["pv"]["v"] == q_pv_t["pv"]["v"].to(u.kpc / u.Myr))
assert np.all(q2["t"] == q_pv_t["t"].to(u.Myr))
def test_conversion_via_lshift(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv << StructuredUnit(("AU", "AU/day"))
assert isinstance(q1, Quantity)
assert q1["p"].unit == u.AU
assert q1["v"].unit == u.AU / u.day
assert np.all(q1["p"] == q_pv["p"].to(u.AU))
assert np.all(q1["v"] == q_pv["v"].to(u.AU / u.day))
q2 = q_pv << self.pv_unit
assert q2["p"].unit == self.p_unit
assert q2["v"].unit == self.v_unit
assert np.all(q2["p"].value == self.pv["p"])
assert np.all(q2["v"].value == self.pv["v"])
assert np.may_share_memory(q2, q_pv)
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t << "(kpc,kpc/Myr),Myr"
assert q2["pv"]["p"].unit == u.kpc
assert q2["pv"]["v"].unit == u.kpc / u.Myr
assert q2["t"].unit == u.Myr
assert np.all(q2["pv"]["p"] == q_pv_t["pv"]["p"].to(u.kpc))
assert np.all(q2["pv"]["v"] == q_pv_t["pv"]["v"].to(u.kpc / u.Myr))
assert np.all(q2["t"] == q_pv_t["t"].to(u.Myr))
def test_inplace_conversion(self):
# In principle, in-place might be possible, in which case this should be
# changed -- ie ``q1 is q_link``.
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.copy()
q_link = q1
q1 <<= StructuredUnit(("AU", "AU/day"))
assert q1 is not q_link
assert q1["p"].unit == u.AU
assert q1["v"].unit == u.AU / u.day
assert np.all(q1["p"] == q_pv["p"].to(u.AU))
assert np.all(q1["v"] == q_pv["v"].to(u.AU / u.day))
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.copy()
q_link = q2
q2 <<= "(kpc,kpc/Myr),Myr"
assert q2 is not q_link
assert q2["pv"]["p"].unit == u.kpc
assert q2["pv"]["v"].unit == u.kpc / u.Myr
assert q2["t"].unit == u.Myr
assert np.all(q2["pv"]["p"] == q_pv_t["pv"]["p"].to(u.kpc))
assert np.all(q2["pv"]["v"] == q_pv_t["pv"]["v"].to(u.kpc / u.Myr))
assert np.all(q2["t"] == q_pv_t["t"].to(u.Myr))
def test_si(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_si = q_pv_t.si
assert_array_equal(q_pv_t_si, q_pv_t.to("(m,m/s),s"))
def test_cgs(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_cgs = q_pv_t.cgs
assert_array_equal(q_pv_t_cgs, q_pv_t.to("(cm,cm/s),s"))
def test_equality(self):
q_pv = Quantity(self.pv, self.pv_unit)
equal = q_pv == q_pv
not_equal = q_pv != q_pv
assert np.all(equal)
assert not np.any(not_equal)
equal2 = q_pv == q_pv[1]
not_equal2 = q_pv != q_pv[1]
assert np.all(equal2 == [False, True, False])
assert np.all(not_equal2 != equal2)
q1 = q_pv.to(("AU", "AU/day"))
# Ensure same conversion is done, by placing q1 first.
assert np.all(q1 == q_pv)
assert not np.any(q1 != q_pv)
# Check different names in dtype.
assert np.all(q1.value * u.Unit("AU, AU/day") == q_pv)
assert not np.any(q1.value * u.Unit("AU, AU/day") != q_pv)
assert (q_pv == "b") is False
assert ("b" != q_pv) is True
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
assert np.all((q_pv_t[2] == q_pv_t) == [False, False, True])
assert np.all((q_pv_t[2] != q_pv_t) != [False, False, True])
assert (q_pv == q_pv_t) is False
assert (q_pv_t != q_pv) is True
def test_setitem(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_pv[1] = (2.0, 2.0) * self.pv_unit
assert q_pv[1].value == np.array((2.0, 2.0), self.pv_dtype)
q_pv[1:2] = (1.0, 0.5) * u.Unit("AU, AU/day")
assert q_pv["p"][1] == 1.0 * u.AU
assert q_pv["v"][1] == 0.5 * u.AU / u.day
q_pv["v"] = 1.0 * u.km / u.s
assert np.all(q_pv["v"] == 1.0 * u.km / u.s)
with pytest.raises(u.UnitsError):
q_pv[1] = (1.0, 1.0) * u.Unit("AU, AU")
with pytest.raises(u.UnitsError):
q_pv["v"] = 1.0 * u.km
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t[1] = ((2.0, 2.0), 3.0) * self.pv_t_unit
assert q_pv_t[1].value == np.array(((2.0, 2.0), 3.0), self.pv_t_dtype)
q_pv_t[1:2] = ((1.0, 0.5), 5.0) * u.Unit("(AU, AU/day), yr")
assert q_pv_t["pv"][1] == (1.0, 0.5) * u.Unit("AU, AU/day")
assert q_pv_t["t"][1] == 5.0 * u.yr
q_pv_t["pv"] = (1.0, 0.5) * self.pv_unit
assert np.all(q_pv_t["pv"] == (1.0, 0.5) * self.pv_unit)
class TestStructuredQuantityFunctions(StructuredTestBaseWithUnits):
@classmethod
def setup_class(self):
super().setup_class()
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_empty_like(self):
z = np.empty_like(self.q_pv)
assert z.dtype == self.pv_dtype
assert z.unit == self.pv_unit
assert z.shape == self.pv.shape
@pytest.mark.parametrize("func", [np.zeros_like, np.ones_like])
def test_zeros_ones_like(self, func):
z = func(self.q_pv)
assert z.dtype == self.pv_dtype
assert z.unit == self.pv_unit
assert z.shape == self.pv.shape
assert_array_equal(z, func(self.pv) << self.pv_unit)
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'km / s'"):
rfn.structured_to_unstructured(self.q_pv)
# For the other tests of ``structured_to_unstructured``, see
# ``test_quantity_non_ufuncs.TestRecFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
# can't structure something that's already structured
dtype = np.dtype([("f1", float), ("f2", float)])
with pytest.raises(ValueError, match="The length of the last dimension"):
rfn.unstructured_to_structured(self.q_pv, dtype=self.q_pv.dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_quantity_non_ufuncs.TestRecFunctions.test_unstructured_to_structured``
class TestStructuredSpecificTypeQuantity(StructuredTestBaseWithUnits):
def setup_class(self):
super().setup_class()
class PositionVelocity(u.SpecificTypeQuantity):
_equivalent_unit = self.pv_unit
self.PositionVelocity = PositionVelocity
def test_init(self):
pv = self.PositionVelocity(self.pv, self.pv_unit)
assert isinstance(pv, self.PositionVelocity)
assert type(pv["p"]) is u.Quantity
assert_array_equal(pv["p"], self.pv["p"] << self.pv_unit["p"])
pv2 = self.PositionVelocity(self.pv, "AU,AU/day")
assert_array_equal(pv2["p"], self.pv["p"] << u.AU)
def test_error_on_non_equivalent_unit(self):
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, "AU")
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, "AU,yr")
class TestStructuredLogUnit:
def setup_class(self):
self.mag_time_dtype = np.dtype([("mag", "f8"), ("t", "f8")])
self.mag_time = np.array([(20.0, 10.0), (25.0, 100.0)], self.mag_time_dtype)
def test_unit_initialization(self):
mag_time_unit = StructuredUnit((u.STmag, u.s), self.mag_time_dtype)
assert mag_time_unit["mag"] == u.STmag
assert mag_time_unit["t"] == u.s
mag_time_unit2 = u.Unit("mag(ST),s")
assert mag_time_unit2 == mag_time_unit
def test_quantity_initialization(self):
su = u.Unit("mag(ST),s")
mag_time = self.mag_time << su
assert isinstance(mag_time["mag"], u.Magnitude)
assert isinstance(mag_time["t"], u.Quantity)
assert mag_time.unit == su
assert_array_equal(mag_time["mag"], self.mag_time["mag"] << u.STmag)
assert_array_equal(mag_time["t"], self.mag_time["t"] << u.s)
def test_quantity_si(self):
mag_time = self.mag_time << u.Unit("mag(ST),yr")
mag_time_si = mag_time.si
assert_array_equal(mag_time_si["mag"], mag_time["mag"].si)
assert_array_equal(mag_time_si["t"], mag_time["t"].si)
class TestStructuredMaskedQuantity(StructuredTestBaseWithUnits):
"""Somewhat minimal tests. Conversion is most stringent."""
def setup_class(self):
super().setup_class()
self.qpv = self.pv << self.pv_unit
self.pv_mask = np.array(
[
(True, False),
(False, False),
(False, True),
],
[("p", bool), ("v", bool)],
)
self.mpv = Masked(self.qpv, mask=self.pv_mask)
def test_init(self):
assert isinstance(self.mpv, Masked)
assert isinstance(self.mpv, Quantity)
assert_array_equal(self.mpv.unmasked, self.qpv)
assert_array_equal(self.mpv.mask, self.pv_mask)
def test_slicing(self):
mp = self.mpv["p"]
assert isinstance(mp, Masked)
assert isinstance(mp, Quantity)
assert_array_equal(mp.unmasked, self.qpv["p"])
assert_array_equal(mp.mask, self.pv_mask["p"])
def test_conversion(self):
mpv = self.mpv.to("AU,AU/day")
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.to("AU,AU/day"))
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
def test_si(self):
mpv = self.mpv.si
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.si)
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
|
d702628120c499defebd1a3a974c803867e9c364b76c60613cd9a0d28ca80706 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED,
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS,
TBD_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24, NUMPY_LT_1_25
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
# To get the functions that could be covered, we look for those that
# are in modules we care about and have been overridden.
def get_wrapped_functions(*modules):
if NUMPY_LT_1_25:
def allows_array_function_override(f):
return (
hasattr(f, "__wrapped__")
and f is not np.printoptions
and not f.__name__.startswith("_")
)
else:
from numpy.testing.overrides import allows_array_function_override
return {
name: f
for mod in modules
for name, f in mod.__dict__.items()
if callable(f) and allows_array_function_override(f)
}
all_wrapped_functions = get_wrapped_functions(
np, np.fft, np.linalg, np.lib.recfunctions
)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup_method(self):
self.q = np.arange(9.0).reshape(3, 3) / 4.0 * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1.0 * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1.0 * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1.0 * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150.0, 350.0]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices, axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis, self.q.value) * self.q.unit**2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize("axes", ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup_method(self):
self.q = (np.arange(9.0).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.0)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value, axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True], self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.0) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.0) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.0)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.0) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.0) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.0).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25.0 * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup_method(self):
self.q1 = np.arange(6.0).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop("q_list", [self.q1, self.q2])
q_ref = kwargs.pop("q_ref", q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
# regression test for gh-13322.
self.check(np.concatenate, dtype="f4")
self.check(
np.concatenate,
q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1,
)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = (
np.concatenate([self.q1.value, self.q2.to_value(self.q1.unit)])
* self.q1.unit
)
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0.0, 1.0 * u.m], [1.0 * u.cm, 2.0 * u.km]])
assert np.all(result == np.block([[0, 1.0], [0.01, 2000.0]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = (
np.append(self.q1.value, self.q2.to_value(self.q1.unit), axis=0)
* self.q1.unit
)
assert np.all(out == expected)
a = np.arange(3.0)
result = np.append(a, 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.0).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50.0, 25.0] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.0)
result = np.insert(a, (2,), 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50.0 * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0.0 * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1.0, 6.0) * u.m
out = np.pad(q, (2, 3), "constant", constant_values=(0.0, 150.0 * u.cm))
assert out.unit == q.unit
expected = (
np.pad(q.value, (2, 3), "constant", constant_values=(0.0, 1.5)) * q.unit
)
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), "constant", constant_values=150.0 * u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), "constant", constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), "linear_ramp", end_values=(25.0 * u.cm, 0.0))
assert out3.unit == q.unit
expected3 = (
np.pad(q.value, (2, 3), "linear_ramp", end_values=(0.25, 0.0)) * q.unit
)
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.arange(54.0).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_max(self):
self.check(np.max)
def test_min(self):
self.check(np.min)
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`sometrue` is deprecated as of NumPy 1.25.0")
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`alltrue` is deprecated as of NumPy 1.25.0")
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`product` is deprecated as of NumPy 1.25.0")
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`cumproduct` is deprecated as of NumPy 1.25.0")
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round(self):
self.check(np.round)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`round_` is deprecated as of NumPy 1.25.0")
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0.0, 10.0, 20.0]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
unit = self.q.unit
expected = (
np.clip(self.q.value, qmin.to_value(unit), qmax.to_value(unit)) * unit
)
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.0 * u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1.0 * u.km)
expected = np.where([True, False, True], self.q.value, 1000.0) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select(
[q < 0.55 * u.m, q > 1.0 * u.m], [q, q.to(u.cm)], default=-1.0 * u.km
)
expected = (
np.select([q.value < 0.55, q.value > 1], [q.value, q.value], default=-1000)
* u.m
)
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1 * u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1.0, 2.0] * u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.0 * u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q, nan=1.0 * u.km, posinf=2.0 * u.km, neginf=-2 * u.km)
expected = [-2000.0, 2000.0, 1000.0, 3.0, 4.0] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1.0 + 1j] * u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1.0 + 1j] * u.m)
def test_isclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 102.0, 199.0]) * u.cm
atol = 1.5 * u.cm
rtol = 1.0 * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(
q1.value, q2.to_value(q1.unit), atol=atol.to_value(q1.unit)
)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 198.0]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit), atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit**2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
def test_median_nan_scalar(self):
# See gh-12165; this dropped the unit in numpy < 1.22
data = [1.0, 2, np.nan, 3, 4] << u.km
result = np.median(data)
assert_array_equal(result, np.nan * u.km)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
atol = 2 * u.cm
rtol = 1.0 * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0.0, rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2 * u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.0 * u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.0) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@pytest.mark.parametrize("equal_nan", [False, True])
def test_array_equal_nan(self, equal_nan):
q1 = np.linspace(0, 1, num=11) * u.m
q1[0] = np.nan
q2 = q1.to(u.cm)
result = np.array_equal(q1, q2, equal_nan=equal_nan)
assert result == equal_nan
def test_array_equal_incompatible_units(self):
assert not np.array_equal([1, 2] * u.m, [1, 2] * u.s)
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0.0, 1.0, 2.0]] * 3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
def test_array_equiv_incompatible_units(self):
assert not np.array_equiv([1, 1] * u.m, [1] * u.s)
class TestNanFunctions(InvariantUnitTestSetup):
def setup_method(self):
super().setup_method()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit**2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.dot(q1, q2)
assert o == 32.0 * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32.0 + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.0).reshape(3, 4, 5) * u.m
b = np.arange(24.0).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value, axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum("...i", q1)
assert np.all(o == q1)
o = np.einsum("ii", q1)
expected = np.einsum("ii", q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum("ij,jk", q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum("ij,jk", q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum_path("...i", q1)
assert o[0] == ["einsum_path", (0,)]
o = np.einsum_path("ii", q1)
assert o[0] == ["einsum_path", (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path("ij,jk", q1, q2)
assert o[0] == ["einsum_path", (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.0) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10.0 * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.0) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.0) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.0) * x.unit
assert np.all(out == expected)
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.0, n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
spacing = 10.0 * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit / spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2.0 * u.s
y = [1.0, 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.0 * u.dex(unit), 20 * u.dex(unit), 10)
expected = np.logspace(10.0, 20.0, 10) * unit
assert np.all(out == expected)
out = np.logspace(10.0 * u.STmag, 20 * u.STmag, 10)
expected = np.logspace(10.0, 20.0, 10, base=10.0 ** (-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1.0, 7.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250.0, 2750.0]) * u.m
xp = np.arange(5.0) * u.km
yp = np.arange(5.0) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1 * u.s, 1 * u.day])
expected = (
np.piecewise(x.value, [x.value < 0, x.value >= 0], [-1, 24 * 3600]) * u.s
)
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [-1 * u.s, 1 * u.day, lambda x: 1 * u.hour]
)
expected2 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [-1, 24 * 3600, 3600])
* u.s
)
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [0, 1 * u.percent, lambda x: 1 * u.one]
)
expected3 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [0, 0.01, 1]) * u.one
)
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.0])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.0])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500.0, 2500.0, 4500.0]) * u.m
bins = np.arange(10.0) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup_method(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(
self,
function,
*args,
value_args=None,
value_kwargs=None,
expected_units=None,
**kwargs
):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(
out[bin_slice], expected[bin_slice], expected_units[bin_slice]
):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(
np.histogram, x, value_args=(x.value,), expected_units=(None, x.unit)
)
# With bins.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
value_args=(x.value, [1.25, 2.0]),
expected_units=(None, x.unit),
)
# With density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
density=True,
value_args=(x.value, [1.25, 2.0]),
expected_units=(1 / x.unit, x.unit),
)
# With weights.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit),
)
# With weights and density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
density=True,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit / x.unit, x.unit),
)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@classmethod
def _range_value(cls, range, unit):
if isinstance(range, u.Quantity):
return range.to_value(unit)
else:
return [cls._range_value(r, unit) for r in range]
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_range(self, range):
self.check(
np.histogram,
self.x,
range=range,
value_args=[self.x.value],
value_kwargs=dict(range=self._range_value(range, self.x.unit)),
expected_units=(None, self.x.unit),
)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.0]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_bin_edges_range(self, range):
out_b = np.histogram_bin_edges(self.x, range=range)
expected_b = np.histogram_bin_edges(
self.x.value, range=self._range_value(range, self.x.unit)
)
assert np.all(out_b.value == expected_b)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(
np.histogram2d,
x,
y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit),
)
# Check units with density.
self.check(
np.histogram2d,
x,
y,
density=True,
value_args=(x.value, y.value),
expected_units=(1 / (x.unit * y.unit), x.unit, y.unit),
)
# Check units with weights.
self.check(
np.histogram2d,
x,
y,
weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogram2d,
x,
y,
[5, inb_y],
value_args=(x.value, y.value, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, x.unit, y.unit),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogram2d,
x.value,
y.value,
bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, u.one, u.one),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogram2d_range(self, range):
self.check(
np.histogram2d,
self.x,
self.y,
range=range,
value_args=[self.x.value, self.y.value],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, self.x.unit, self.y.unit),
)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(
np.histogramdd,
sample,
value_args=(sample_values,),
expected_units=(None, sample_units),
)
# Check units with density.
self.check(
np.histogramdd,
sample,
density=True,
value_args=(sample_values,),
expected_units=(1 / (self.x.unit * self.y.unit), sample_units),
)
# Check units with weights.
self.check(
np.histogramdd,
sample,
weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogramdd,
sample,
[5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, sample_units),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogramdd,
sample_values,
bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, (u.one, u.one)),
)
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(
np.histogramdd,
xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(
np.histogramdd,
(xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogramdd_range(self, range):
self.check(
np.histogramdd,
(self.x, self.y),
range=range,
value_args=[(self.x.value, self.y.value)],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, (self.x.unit, self.y.unit)),
)
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
def test_sort_axis(self):
self.check(np.sort, axis=0)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
self.check(np.msort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup_method(self):
self.q = np.arange(3.0) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=", ")
expected1 = "[0., 1., 2.]"
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=", ", formatter={"all": str})
expected2 = "[0.0 Jy, 1.0 Jy, 2.0 Jy]"
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.q, None, None, None, ", ", "", np._NoValue, {"float": str}
)
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=", ", formatter={"int": str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype("f4")
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup_method(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), "m", dtype="u1")
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.0) * u.m
q2 = np.arange(5.0) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([[0.0, 1.0, -1.0], [3.0, 5.0, 3.0], [0.0, 1.0, -1]]) * u.m
self.q2 = np.array([0.0, 100.0, 150.0, 200.0]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop("unit", self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True),
),
)
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize(
"kwargs",
(
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1),
),
)
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize("kwargs", (dict(), dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0.0, np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0.0, 1.0, 2.0, np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.0) * u.m
out = np.ediff1d(x, to_begin=-12.5 * u.cm, to_end=1 * u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.0) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize("function", [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1 * u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup_method(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.0).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup_method(self):
self.q = (
np.array(
[[ 1.0, -1.0, 2.0],
[ 0.0, 3.0, -1.0],
[-1.0, -1.0, 1.0]]
) << u.m
) # fmt: skip
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.0).reshape(3, 3) / 4 * u.m
tol = 1.0 * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.0) * np.arange(5.0)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = (
np.linalg.pinv(self.q.value, rcond.to_value(self.q.unit)) / self.q.unit
)
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, "unit")
@needs_array_function
def test_solve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value, rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit**2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.0 * u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1.0, 1.0, 1.0]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.0 * u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit**0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype(
[("pv", np.dtype([("pp", "f8"), ("vv", "f8")])), ("t", "f8")]
)
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[((4.0, 2.5), 0.0), ((5.0, 5.0), 1.0), ((6.0, 7.5), 2.0)], self.pv_t_dtype
)
self.pv_unit = u.StructuredUnit((u.km, u.km / u.s), ("p", "v"))
self.pv_t_unit = u.StructuredUnit((self.pv_unit, u.s), ("pv", "t"))
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit("m, (cm, um)")
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype = np.dtype([("f1", float), ("f2", float), ("f3", float)])
# It works.
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# Can't structure something that's already structured.
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
def test_merge_arrays_repeat_dtypes(self):
# Cannot merge things with repeat dtypes.
q1 = u.Quantity([(1,)], dtype=[("f1", float)])
q2 = u.Quantity([(1,)], dtype=[("f1", float)])
with pytest.raises(ValueError, match="field 'f1' occurs more than once"):
rfn.merge_arrays((q1, q2))
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays(self, flatten):
"""Test `numpy.lib.recfunctions.merge_arrays`."""
# Merge single normal array.
arr = rfn.merge_arrays(self.q_pv["p"], flatten=flatten)
assert_array_equal(arr["f0"], self.q_pv["p"])
assert arr.unit == (u.km,)
# Merge single structured array.
arr = rfn.merge_arrays(self.q_pv, flatten=flatten)
assert_array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
# Merge 1-element tuple.
arr = rfn.merge_arrays((self.q_pv,), flatten=flatten)
assert np.array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
@pytest.mark.xfail
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_nonquantities(self, flatten):
# Fails because cannot create quantity from structured array.
arr = rfn.merge_arrays((q_pv["p"], q_pv.value), flatten=flatten)
def test_merge_array_nested_structure(self):
# Merge 2-element tuples without flattening.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t))
assert_array_equal(arr["f0"], self.q_pv)
assert_array_equal(arr["f1"], self.q_pv_t)
assert arr.unit == ((u.km, u.km / u.s), ((u.km, u.km / u.s), u.s))
def test_merge_arrays_flatten_nested_structure(self):
# Merge 2-element tuple, flattening it.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t), flatten=True)
assert_array_equal(arr["p"], self.q_pv["p"])
assert_array_equal(arr["v"], self.q_pv["v"])
assert_array_equal(arr["pp"], self.q_pv_t["pv"]["pp"])
assert_array_equal(arr["vv"], self.q_pv_t["pv"]["vv"])
assert_array_equal(arr["t"], self.q_pv_t["t"])
assert arr.unit == (u.km, u.km / u.s, u.km, u.km / u.s, u.s)
def test_merge_arrays_asrecarray(self):
with pytest.raises(ValueError, match="asrecarray=True is not supported."):
rfn.merge_arrays(self.q_pv, asrecarray=True)
def test_merge_arrays_usemask(self):
with pytest.raises(ValueError, match="usemask=True is not supported."):
rfn.merge_arrays(self.q_pv, usemask=True)
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_str(self, flatten):
with pytest.raises(
TypeError, match="the Quantity implementation cannot handle"
):
rfn.merge_arrays((self.q_pv, np.array(["a", "b", "c"])), flatten=flatten)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
} # fmt: skip
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (
SUBCLASS_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(FUNCTION_HELPERS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
76ca4b6f9aca32f2aee76588f6b73bb618330a616e7248814ae544c39c9350a3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Unit tests for the handling of physical types in `astropy.units`.
"""
import pickle
import pytest
from astropy import units as u
from astropy.constants import hbar
from astropy.units import physical
from astropy.utils.exceptions import AstropyDeprecationWarning
unit_physical_type_pairs = [
(u.m, "length"),
(u.cm**3, "volume"),
(u.km / u.h, "speed"),
(u.barn * u.Mpc, "volume"),
(u.m * u.s**8, "unknown"),
(u.m / u.m, "dimensionless"),
(hbar.unit, "angular momentum"),
(u.erg / (u.cm**2 * u.s * u.AA), "spectral flux density wav"),
(u.photon / (u.cm**2 * u.s * u.AA), "photon flux density wav"),
(u.photon / (u.cm**2 * u.s * u.Hz), "photon flux density"),
(u.byte, "data quantity"),
(u.bit, "data quantity"),
(u.imperial.mi / u.week, "speed"),
(u.erg / u.s, "power"),
(u.C / u.s, "electrical current"),
(u.C / u.s / u.cm**2, "electrical current density"),
(u.T * u.m**2, "magnetic flux"),
(u.N * u.m, "energy"),
(u.rad / u.ms, "angular speed"),
(u.Unit(1), "dimensionless"),
(u.m**2, "area"),
(u.s, "time"),
(u.rad, "angle"),
(u.sr, "solid angle"),
(u.m / u.s**2, "acceleration"),
(u.Hz, "frequency"),
(u.g, "mass"),
(u.mol, "amount of substance"),
(u.K, "temperature"),
(u.deg_C, "temperature"),
(u.imperial.deg_F, "temperature"),
(u.imperial.deg_R, "temperature"),
(u.imperial.deg_R / u.m, "temperature_gradient"),
(u.N, "force"),
(u.J, "energy"),
(u.Pa, "pressure"),
(u.W, "power"),
(u.kg / u.m**3, "mass density"),
(u.m**3 / u.kg, "specific volume"),
(u.mol / u.m**3, "molar concentration"),
(u.kg * u.m / u.s, "momentum/impulse"),
(u.kg * u.m**2 / u.s, "angular momentum"),
(u.rad / u.s, "angular speed"),
(u.rad / u.s**2, "angular acceleration"),
(u.g / (u.m * u.s), "dynamic viscosity"),
(u.m**2 / u.s, "kinematic viscosity"),
(u.m**-1, "wavenumber"),
(u.A, "electrical current"),
(u.C, "electrical charge"),
(u.V, "electrical potential"),
(u.Ohm, "electrical resistance"),
(u.S, "electrical conductance"),
(u.F, "electrical capacitance"),
(u.C * u.m, "electrical dipole moment"),
(u.A / u.m**2, "electrical current density"),
(u.V / u.m, "electrical field strength"),
(u.C / u.m**2, "electrical flux density"),
(u.C / u.m**3, "electrical charge density"),
(u.F / u.m, "permittivity"),
(u.Wb, "magnetic flux"),
(u.T, "magnetic flux density"),
(u.A / u.m, "magnetic field strength"),
(u.H / u.m, "electromagnetic field strength"),
(u.H, "inductance"),
(u.cd, "luminous intensity"),
(u.lm, "luminous flux"),
(u.lx, "luminous emittance/illuminance"),
(u.W / u.sr, "radiant intensity"),
(u.cd / u.m**2, "luminance"),
(u.astrophys.Jy, "spectral flux density"),
(u.astrophys.R, "photon flux"),
(u.misc.bit, "data quantity"),
(u.misc.bit / u.s, "bandwidth"),
(u.cgs.Franklin, "electrical charge (ESU)"),
(u.cgs.statampere, "electrical current (ESU)"),
(u.cgs.Biot, "electrical current (EMU)"),
(u.cgs.abcoulomb, "electrical charge (EMU)"),
(u.imperial.btu / (u.s * u.m * u.imperial.deg_F), "thermal conductivity"),
(u.imperial.cal / u.deg_C, "heat capacity"),
(u.imperial.cal / u.deg_C / u.g, "specific heat capacity"),
(u.J * u.m**-2 * u.s**-1, "energy flux"),
(u.W / u.m**2, "energy flux"),
(u.m**3 / u.mol, "molar volume"),
(u.m / u.S, "electrical resistivity"),
(u.S / u.m, "electrical conductivity"),
(u.A * u.m**2, "magnetic moment"),
(u.J / u.T, "magnetic moment"),
(u.yr**-1 * u.Mpc**-3, "volumetric rate"),
(u.m / u.s**3, "jerk"),
(u.m / u.s**4, "snap"),
(u.m / u.s**5, "crackle"),
(u.m / u.s**6, "pop"),
(u.deg_C / u.m, "temperature gradient"),
(u.imperial.deg_F / u.m, "temperature gradient"),
(u.imperial.deg_R / u.imperial.ft, "temperature gradient"),
(u.imperial.Calorie / u.g, "specific energy"),
(u.mol / u.L / u.s, "reaction rate"),
(u.imperial.lbf * u.imperial.ft * u.s**2, "moment of inertia"),
(u.mol / u.s, "catalytic activity"),
(u.imperial.kcal / u.deg_C / u.mol, "molar heat capacity"),
(u.mol / u.kg, "molality"),
(u.imperial.inch * u.hr, "absement"),
(u.imperial.ft**3 / u.s, "volumetric flow rate"),
(u.Hz / u.s, "frequency drift"),
(u.Pa**-1, "compressibility"),
(u.dimensionless_unscaled, "dimensionless"),
]
@pytest.mark.parametrize("unit, physical_type", unit_physical_type_pairs)
def test_physical_type_names(unit, physical_type):
"""
Test that the `physical_type` attribute of `u.Unit` objects provides
the expected physical type for various units.
Many of these tests are used to test backwards compatibility.
"""
assert unit.physical_type == physical_type, (
f"{unit!r}.physical_type was expected to return "
f"{physical_type!r}, but instead returned {unit.physical_type!r}."
)
length = u.m.physical_type
time = u.s.physical_type
speed = (u.m / u.s).physical_type
area = (u.m**2).physical_type
wavenumber = (u.m**-1).physical_type
dimensionless = u.dimensionless_unscaled.physical_type
pressure = u.Pa.physical_type
momentum = (u.kg * u.m / u.s).physical_type
@pytest.mark.parametrize(
"physical_type_representation, physical_type_name",
[
(1.0, "dimensionless"),
(u.m, "length"),
("work", "work"),
(5 * u.m, "length"),
(length, length),
(u.Pa, "energy_density"), # attribute-accessible name
("energy_density", "energy_density"), # attribute-accessible name
],
)
def test_getting_physical_type(physical_type_representation, physical_type_name):
"""Test different ways of getting a physical type."""
physical_type = physical.get_physical_type(physical_type_representation)
assert isinstance(physical_type, physical.PhysicalType)
assert physical_type == physical_type_name
@pytest.mark.parametrize(
"argument, exception",
[
("unknown", ValueError),
("not a name of a physical type", ValueError),
({"this set cannot be made into a Quantity"}, TypeError),
],
)
def test_getting_physical_type_exceptions(argument, exception):
"""
Test that `get_physical_type` raises appropriate exceptions when
provided with invalid arguments.
"""
with pytest.raises(exception):
physical.get_physical_type(argument)
def test_physical_type_cannot_become_quantity():
"""
Test that `PhysicalType` instances cannot be cast into `Quantity`
objects. A failure in this test could be related to failures
in subsequent tests.
"""
with pytest.raises(TypeError):
u.Quantity(u.m.physical_type, u.m)
# left term, right term, operator, expected value
operation_parameters = [
(length, length, "__eq__", True),
(length, area, "__eq__", False),
(length, "length", "__eq__", True),
("length", length, "__eq__", NotImplemented),
(dimensionless, dimensionless, "__eq__", True),
(momentum, "momentum/impulse", "__eq__", True), # test delimiters in names
(pressure, "energy_density", "__eq__", True), # test underscores in names
((u.m**8).physical_type, "unknown", "__eq__", True),
((u.m**8).physical_type, (u.m**9).physical_type, "__eq__", False),
(length, length, "__ne__", False),
(speed, time, "__ne__", True),
(pressure, dimensionless, "__ne__", True),
(length, u.m, "__eq__", NotImplemented),
(length, length, "__mul__", area),
(speed, time, "__mul__", length),
(speed, time, "__rmul__", length),
(length, time, "__truediv__", speed),
(area, length, "__truediv__", length),
(length, area, "__rtruediv__", length),
(dimensionless, dimensionless, "__mul__", dimensionless),
(dimensionless, dimensionless, "__truediv__", dimensionless),
(length, 2, "__pow__", area),
(area, 0.5, "__pow__", length),
(dimensionless, 4, "__pow__", dimensionless),
(u.m, length, "__mul__", NotImplemented),
(3.2, length, "__mul__", NotImplemented),
(u.m, time, "__truediv__", NotImplemented),
(3.2, length, "__truediv__", NotImplemented),
(length, u.m, "__mul__", area),
(length, u.m, "__rmul__", area),
(speed, u.s, "__mul__", length),
(length, 1, "__mul__", length),
(length, 1, "__rmul__", length),
(length, u.s, "__truediv__", speed),
(area, 1, "__truediv__", area),
(time, u.m, "__rtruediv__", speed),
(length, 1.0, "__rtruediv__", wavenumber),
(length, 2, "__pow__", area),
(length, 32, "__mul__", NotImplemented),
(length, 0, "__rmul__", NotImplemented),
(length, 3.2, "__truediv__", NotImplemented),
(length, -1, "__rtruediv__", NotImplemented),
(length, "length", "__mul__", area),
(length, "length", "__rmul__", area),
(area, "length", "__truediv__", length),
(length, "area", "__rtruediv__", length),
]
@pytest.mark.parametrize("left, right, operator, expected", operation_parameters)
def test_physical_type_operations(left, right, operator, expected):
"""
Test that `PhysicalType` dunder methods that require another
argument behave as intended.
"""
assert getattr(left, operator)(right) == expected
unit_with_physical_type_set = [
(u.m, {"length"}),
(u.kg * u.m / u.s, {"impulse", "momentum"}),
(u.Pa, {"energy density", "pressure", "stress"}),
]
@pytest.mark.parametrize("unit, expected_set", unit_with_physical_type_set)
def test_physical_type_as_set(unit, expected_set):
"""Test making a `physical.PhysicalType` instance into a `set`."""
resulting_set = set(unit.physical_type)
assert resulting_set == expected_set
def test_physical_type_iteration():
"""Test iterating through different physical type names."""
physical_type_names = list(pressure)
assert physical_type_names == ["energy density", "pressure", "stress"]
def test_physical_type_in():
"""
Test that `in` works as expected for `PhysicalType` objects with one
or multiple names.
"""
assert "length" in length
assert "pressure" in pressure
equivalent_unit_pairs = [
(u.m, u.m),
(u.m, u.cm),
(u.N, u.kg * u.m * u.s**-2),
(u.barn * u.Mpc, u.cm**3),
(u.K, u.deg_C),
(u.K, u.imperial.deg_R),
(u.K, u.imperial.deg_F),
(u.deg_C, u.imperial.deg_F),
(u.m**18, u.pc**18),
]
@pytest.mark.parametrize("unit1, unit2", equivalent_unit_pairs)
def test_physical_type_instance_equality(unit1, unit2):
"""
Test that `physical.PhysicalType` instances for units of the same
dimensionality are equal.
"""
assert (unit1.physical_type == unit2.physical_type) is True
assert (unit1.physical_type != unit2.physical_type) is False
@pytest.mark.parametrize("unit1, unit2", equivalent_unit_pairs)
def test_get_physical_type_equivalent_pairs(unit1, unit2):
"""
Test that `get_physical_type` retrieves the same `PhysicalType`
instances for equivalent physical types, except for unknown types
which are not cataloged.
"""
physical_type1 = physical.get_physical_type(unit1)
physical_type2 = physical.get_physical_type(unit2)
assert physical_type1 == physical_type2
if physical_type1 != "unknown":
assert physical_type1 is physical_type2
nonequivalent_unit_pairs = [
(u.m, u.s),
(u.m**18, u.m**19),
(u.N, u.J),
(u.barn, u.imperial.deg_F),
]
@pytest.mark.parametrize("unit1, unit2", nonequivalent_unit_pairs)
def test_physical_type_instance_inequality(unit1, unit2):
"""
Test that `physical.PhysicalType` instances for units with different
dimensionality are considered unequal.
"""
physical_type1 = physical.PhysicalType(unit1, "ptype1")
physical_type2 = physical.PhysicalType(unit2, "ptype2")
assert (physical_type1 != physical_type2) is True
assert (physical_type1 == physical_type2) is False
physical_type_with_expected_str = [
(length, "length"),
(speed, "speed/velocity"),
(pressure, "energy density/pressure/stress"),
(u.deg_C.physical_type, "temperature"),
((u.J / u.K / u.kg).physical_type, "specific entropy/specific heat capacity"),
]
physical_type_with_expected_repr = [
(length, "PhysicalType('length')"),
(speed, "PhysicalType({'speed', 'velocity'})"),
(pressure, "PhysicalType({'energy density', 'pressure', 'stress'})"),
(u.deg_C.physical_type, "PhysicalType('temperature')"),
(
(u.J / u.K / u.kg).physical_type,
"PhysicalType({'specific entropy', 'specific heat capacity'})",
),
]
@pytest.mark.parametrize("physical_type, expected_str", physical_type_with_expected_str)
def test_physical_type_str(physical_type, expected_str):
"""Test using `str` on a `PhysicalType` instance."""
assert str(physical_type) == expected_str
@pytest.mark.parametrize(
"physical_type, expected_repr", physical_type_with_expected_repr
)
def physical_type_repr(physical_type, expected_repr):
"""Test using `repr` on a `PhysicalType` instance."""
assert repr(physical_type) == expected_repr
def test_physical_type_hash():
"""Test that a `PhysicalType` instance can be used as a dict key."""
dictionary = {length: 42}
assert dictionary[length] == 42
@pytest.mark.parametrize("multiplicand", [list(), 42, 0, -1])
def test_physical_type_multiplication(multiplicand):
"""
Test that multiplication of a physical type returns `NotImplemented`
when attempted for an invalid type.
"""
with pytest.raises(TypeError):
length * multiplicand
def test_unrecognized_unit_physical_type():
"""
Test basic functionality for the physical type of an unrecognized
unit.
"""
unrecognized_unit = u.Unit("parrot", parse_strict="silent")
physical_type = unrecognized_unit.physical_type
assert isinstance(physical_type, physical.PhysicalType)
assert physical_type == "unknown"
invalid_inputs = [(42,), ("valid input", 42)]
@pytest.mark.parametrize("invalid_input", invalid_inputs)
def test_invalid_physical_types(invalid_input):
"""
Test that `PhysicalType` cannot be instantiated when one of the
supplied names is not a string, while making sure that the physical
type for the unit remains unknown.
"""
obscure_unit = u.s**87
with pytest.raises(ValueError):
physical.PhysicalType(obscure_unit, invalid_input)
assert obscure_unit.physical_type == "unknown"
class TestDefPhysType:
weird_unit = u.m**99
strange_unit = u.s**42
def test_attempt_to_define_unknown_physical_type(self):
"""Test that a unit cannot be defined as unknown."""
with pytest.raises(ValueError):
physical.def_physical_type(self.weird_unit, "unknown")
assert "unknown" not in physical._unit_physical_mapping
def test_multiple_same_physical_type_names(self):
"""
Test that `def_physical_type` raises an exception when it tries to
set the physical type of a new unit as the name of an existing
physical type.
"""
with pytest.raises(ValueError):
physical.def_physical_type(self.weird_unit, {"time", "something"})
assert self.weird_unit.physical_type == "unknown"
def test_expanding_names_for_physical_type(self):
"""
Test that calling `def_physical_type` on an existing physical
type adds a new physical type name.
"""
weird_name = "weird name"
strange_name = "strange name"
try:
physical.def_physical_type(self.weird_unit, weird_name)
assert (
self.weird_unit.physical_type == weird_name
), f"unable to set physical type for {self.weird_unit}"
except Exception:
raise
finally: # cleanup added name
physical._attrname_physical_mapping.pop(weird_name.replace(" ", "_"), None)
physical._name_physical_mapping.pop(weird_name, None)
# add both strange_name and weird_name
try:
physical.def_physical_type(self.weird_unit, strange_name)
assert set((self.weird_unit).physical_type) == {
weird_name,
strange_name,
}, "did not correctly append a new physical type name."
except Exception:
raise
finally: # cleanup added names
physical._attrname_physical_mapping.pop(
strange_name.replace(" ", "_"), None
)
physical._name_physical_mapping.pop(strange_name, None)
physical._attrname_physical_mapping.pop(weird_name.replace(" ", "_"), None)
physical._name_physical_mapping.pop(weird_name, None)
def test_redundant_physical_type(self):
"""
Test that a physical type name already in use cannot be assigned
for another unit (excluding `"unknown"`).
"""
with pytest.raises(ValueError):
physical.def_physical_type(self.weird_unit, "length")
@staticmethod
def _undef_physical_type(unit):
"""Reset the physical type of unit to "unknown"."""
for name in list(unit.physical_type):
del physical._unit_physical_mapping[name]
del physical._physical_unit_mapping[unit._get_physical_type_id()]
assert unit.physical_type == "unknown"
def teardown_method(self):
"""
Remove the definitions of the physical types that were added
using `def_physical_unit` for testing purposes.
"""
for unit in [self.weird_unit, self.strange_unit]:
physical_type = physical.get_physical_type(unit)
if physical_type != "unknown":
self._undef_physical_type(unit)
assert unit.physical_type == "unknown", (
f"the physical type for {unit}, which was added for"
"testing, was not deleted."
)
@pytest.mark.parametrize(
"method, expected",
[("title", "Length"), ("isalpha", True), ("isnumeric", False), ("upper", "LENGTH")],
)
def test_that_str_methods_work_with_physical_types(method, expected):
"""
Test that str methods work for `PhysicalType` instances while issuing
a deprecation warning.
"""
with pytest.warns(AstropyDeprecationWarning, match="PhysicalType instances"):
result_of_method_call = getattr(length, method)()
assert result_of_method_call == expected
def test_missing_physical_type_attribute():
"""
Test that a missing attribute raises an `AttributeError`.
This test should be removed when the deprecated option of calling
string methods on PhysicalType instances is removed from
`PhysicalType.__getattr__`.
"""
with pytest.raises(AttributeError):
length.not_the_name_of_a_str_or_physical_type_attribute
@pytest.mark.parametrize("ptype_name", ["length", "speed", "entropy"])
def test_pickling(ptype_name):
# Regression test for #11685
ptype = u.get_physical_type(ptype_name)
pkl = pickle.dumps(ptype)
other = pickle.loads(pkl)
assert other == ptype
def test_physical_types_module_access():
# all physical type names in dir
assert set(dir(physical)).issuperset(physical._attrname_physical_mapping.keys())
assert set(dir(physical)).issuperset(physical.__all__)
# all physical type can be accessed by name
for pname in physical._attrname_physical_mapping.keys():
ptype = physical._attrname_physical_mapping[pname]
assert hasattr(physical, pname) # make sure works in lazy load
assert getattr(physical, pname) is ptype
# a failed access
with pytest.raises(AttributeError, match="has no attribute"):
physical.not_a_valid_physical_type_name
|
fd88d5e80ec00ccb9fb67589d4acf984742b62f54efddc6ff14007ddb51b6424 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from copy import deepcopy
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, SkyCoord, galactocentric_frame_defaults
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import (
Attribute,
CoordinateAttribute,
DifferentialAttribute,
EarthLocationAttribute,
QuantityAttribute,
TimeAttribute,
)
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.builtin_frames import (
FK4,
FK5,
GCRS,
HCRS,
ICRS,
ITRS,
AltAz,
Galactic,
Galactocentric,
HADec,
)
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES,
CartesianDifferential,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from .test_representation import unitphysics # this fixture is used below # noqa: F401
def setup_function(func):
"""Copy original 'REPRESENTATIONCLASSES' as attribute in function."""
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
"""Reset REPRESENTATION_CLASSES to original value."""
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
def test_frame_attribute_descriptor():
"""Unit tests of the Attribute descriptor."""
class TestAttributes:
attr_none = Attribute()
attr_2 = Attribute(default=2)
attr_3_attr2 = Attribute(default=3, secondary_attribute="attr_2")
attr_none_attr2 = Attribute(default=None, secondary_attribute="attr_2")
attr_none_nonexist = Attribute(default=None, secondary_attribute="nonexist")
t = TestAttributes()
# Defaults
assert t.attr_none is None
assert t.attr_2 == 2
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
assert t.attr_none_nonexist is None # No default and non-existent secondary attr
# Setting values via '_'-prefixed internal vars
# (as would normally done in __init__)
t._attr_none = 10
assert t.attr_none == 10
t._attr_2 = 20
assert t.attr_2 == 20
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
t._attr_none_attr2 = 40
assert t.attr_none_attr2 == 40
# Make sure setting values via public attribute fails
with pytest.raises(AttributeError) as err:
t.attr_none = 5
assert "Cannot set frame attribute" in str(err.value)
def test_frame_subclass_attribute_descriptor():
"""Unit test of the attribute descriptors in subclasses."""
_EQUINOX_B1980 = Time("B1980", scale="tai")
class MyFK4(FK4):
# equinox inherited from FK4, obstime overridden, and newattr is new
obstime = TimeAttribute(default=_EQUINOX_B1980)
newattr = Attribute(default="newattr")
mfk4 = MyFK4()
assert mfk4.equinox.value == "B1950.000"
assert mfk4.obstime.value == "B1980.000"
assert mfk4.newattr == "newattr"
with pytest.warns(AstropyDeprecationWarning):
assert set(mfk4.get_frame_attr_names()) == {"equinox", "obstime", "newattr"}
mfk4 = MyFK4(equinox="J1980.0", obstime="J1990.0", newattr="world")
assert mfk4.equinox.value == "J1980.000"
assert mfk4.obstime.value == "J1990.000"
assert mfk4.newattr == "world"
def test_frame_multiple_inheritance_attribute_descriptor():
"""
Ensure that all attributes are accumulated in case of inheritance from
multiple BaseCoordinateFrames. See
https://github.com/astropy/astropy/pull/11099#issuecomment-735829157
"""
class Frame1(BaseCoordinateFrame):
attr1 = Attribute()
class Frame2(BaseCoordinateFrame):
attr2 = Attribute()
class Frame3(Frame1, Frame2):
pass
assert len(Frame3.frame_attributes) == 2
assert "attr1" in Frame3.frame_attributes
assert "attr2" in Frame3.frame_attributes
# In case the same attribute exists in both frames, the one from the
# left-most class in the MRO should take precedence
class Frame4(BaseCoordinateFrame):
attr1 = Attribute()
attr2 = Attribute()
class Frame5(Frame1, Frame4):
pass
assert Frame5.frame_attributes["attr1"] is Frame1.frame_attributes["attr1"]
assert Frame5.frame_attributes["attr2"] is Frame4.frame_attributes["attr2"]
def test_differentialattribute():
# Test logic of passing input through to allowed class
vel = [1, 2, 3] * u.km / u.s
dif = r.CartesianDifferential(vel)
class TestFrame(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif, allowed_classes=[r.CartesianDifferential]
)
frame1 = TestFrame()
frame2 = TestFrame(attrtest=dif)
frame3 = TestFrame(attrtest=vel)
assert np.all(frame1.attrtest.d_xyz == frame2.attrtest.d_xyz)
assert np.all(frame1.attrtest.d_xyz == frame3.attrtest.d_xyz)
# This shouldn't work if there is more than one allowed class:
class TestFrame2(BaseCoordinateFrame):
attrtest = DifferentialAttribute(
default=dif,
allowed_classes=[r.CartesianDifferential, r.CylindricalDifferential],
)
frame1 = TestFrame2()
frame2 = TestFrame2(attrtest=dif)
with pytest.raises(TypeError):
TestFrame2(attrtest=vel)
def test_create_data_frames():
# from repr
i1 = ICRS(r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc))
i2 = ICRS(r.UnitSphericalRepresentation(lon=1 * u.deg, lat=2 * u.deg))
# from preferred name
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
i4 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
assert i1.data.lat == i3.data.lat
assert i1.data.lon == i3.data.lon
assert i1.data.distance == i3.data.distance
assert i2.data.lat == i4.data.lat
assert i2.data.lon == i4.data.lon
# now make sure the preferred names work as properties
assert_allclose(i1.ra, i3.ra)
assert_allclose(i2.ra, i4.ra)
assert_allclose(i1.distance, i3.distance)
with pytest.raises(AttributeError):
i1.ra = [11.0] * u.deg
def test_create_orderered_data():
TOL = 1e-10 * u.deg
i = ICRS(1 * u.deg, 2 * u.deg)
assert (i.ra - 1 * u.deg) < TOL
assert (i.dec - 2 * u.deg) < TOL
g = Galactic(1 * u.deg, 2 * u.deg)
assert (g.l - 1 * u.deg) < TOL
assert (g.b - 2 * u.deg) < TOL
a = AltAz(1 * u.deg, 2 * u.deg)
assert (a.az - 1 * u.deg) < TOL
assert (a.alt - 2 * u.deg) < TOL
with pytest.raises(TypeError):
ICRS(1 * u.deg, 2 * u.deg, 1 * u.deg, 2 * u.deg)
with pytest.raises(TypeError):
sph = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
ICRS(sph, 1 * u.deg, 2 * u.deg)
def test_create_nodata_frames():
i = ICRS()
assert len(i.frame_attributes) == 0
f5 = FK5()
assert f5.equinox == FK5.get_frame_attr_defaults()["equinox"]
f4 = FK4()
assert f4.equinox == FK4.get_frame_attr_defaults()["equinox"]
# obstime is special because it's a property that uses equinox if obstime is not set
assert f4.obstime in (
FK4.get_frame_attr_defaults()["obstime"],
FK4.get_frame_attr_defaults()["equinox"],
)
def test_no_data_nonscalar_frames():
a1 = AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3, 1)) * u.deg_C,
)
assert a1.obstime.shape == (3, 10)
assert a1.temperature.shape == (3, 10)
assert a1.shape == (3, 10)
with pytest.raises(ValueError) as exc:
AltAz(
obstime=Time("2012-01-01") + np.arange(10.0) * u.day,
temperature=np.ones((3,)) * u.deg_C,
)
assert "inconsistent shapes" in str(exc.value)
def test_frame_repr():
i = ICRS()
assert repr(i) == "<ICRS Frame>"
f5 = FK5()
assert repr(f5).startswith("<FK5 Frame (equinox=")
i2 = ICRS(ra=1 * u.deg, dec=2 * u.deg)
i3 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.kpc)
assert repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n (1., 2.)>"
assert (
repr(i3)
== "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n (1., 2., 3.)>"
)
# try with arrays
i2 = ICRS(ra=[1.1, 2.1] * u.deg, dec=[2.1, 3.1] * u.deg)
i3 = ICRS(
ra=[1.1, 2.1] * u.deg, dec=[-15.6, 17.1] * u.deg, distance=[11.0, 21.0] * u.kpc
)
assert (
repr(i2) == "<ICRS Coordinate: (ra, dec) in deg\n [(1.1, 2.1), (2.1, 3.1)]>"
)
assert (
repr(i3) == "<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n"
" [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>"
)
def test_frame_repr_vels():
i = ICRS(
ra=1 * u.deg,
dec=2 * u.deg,
pm_ra_cosdec=1 * u.marcsec / u.yr,
pm_dec=2 * u.marcsec / u.yr,
)
# unit comes out as mas/yr because of the preferred units defined in the
# frame RepresentationMapping
assert (
repr(i) == "<ICRS Coordinate: (ra, dec) in deg\n"
" (1., 2.)\n"
" (pm_ra_cosdec, pm_dec) in mas / yr\n"
" (1., 2.)>"
)
def test_converting_units():
# this is a regular expression that with split (see below) removes what's
# the decimal point to fix rounding problems
rexrepr = re.compile(r"(.*?=\d\.).*?( .*?=\d\.).*?( .*)")
# Use values that aren't subject to rounding down to X.9999...
i2 = ICRS(ra=2.0 * u.deg, dec=2.0 * u.deg)
i2_many = ICRS(ra=[2.0, 4.0] * u.deg, dec=[2.0, -8.1] * u.deg)
# converting from FK5 to ICRS and back changes the *internal* representation,
# but it should still come out in the preferred form
i4 = i2.transform_to(FK5()).transform_to(ICRS())
i4_many = i2_many.transform_to(FK5()).transform_to(ICRS())
ri2 = "".join(rexrepr.split(repr(i2)))
ri4 = "".join(rexrepr.split(repr(i4)))
assert ri2 == ri4
assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed
ri2_many = "".join(rexrepr.split(repr(i2_many)))
ri4_many = "".join(rexrepr.split(repr(i4_many)))
assert ri2_many == ri4_many
assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed
# but that *shouldn't* hold if we turn off units for the representation
class FakeICRS(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ra", u.hourangle),
RepresentationMapping("lat", "dec", None),
RepresentationMapping("distance", "distance"),
] # should fall back to default of None unit
}
fi = FakeICRS(i4.data)
ri2 = "".join(rexrepr.split(repr(i2)))
rfi = "".join(rexrepr.split(repr(fi)))
rfi = re.sub("FakeICRS", "ICRS", rfi) # Force frame name to match
assert ri2 != rfi
# the attributes should also get the right units
assert i2.dec.unit == i4.dec.unit
# unless no/explicitly given units
assert i2.dec.unit != fi.dec.unit
assert i2.ra.unit != fi.ra.unit
assert fi.ra.unit == u.hourangle
def test_representation_info():
class NewICRS1(ICRS):
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "rara", u.hourangle),
RepresentationMapping("lat", "decdec", u.degree),
RepresentationMapping("distance", "distance", u.kpc),
]
}
i1 = NewICRS1(
rara=10 * u.degree,
decdec=-12 * u.deg,
distance=1000 * u.pc,
pm_rara_cosdecdec=100 * u.mas / u.yr,
pm_decdec=17 * u.mas / u.yr,
radial_velocity=10 * u.km / u.s,
)
assert allclose(i1.rara, 10 * u.deg)
assert i1.rara.unit == u.hourangle
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.distance, 1000 * u.pc)
assert i1.distance.unit == u.kpc
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# this should auto-set the names of UnitSpherical:
i1.set_representation_cls(
r.UnitSphericalRepresentation, s=r.UnitSphericalCosLatDifferential
)
assert allclose(i1.rara, 10 * u.deg)
assert allclose(i1.decdec, -12 * u.deg)
assert allclose(i1.pm_rara_cosdecdec, 100 * u.mas / u.yr)
assert allclose(i1.pm_decdec, 17 * u.mas / u.yr)
# For backwards compatibility, we also support the string name in the
# representation info dictionary:
class NewICRS2(ICRS):
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ang1", u.hourangle),
RepresentationMapping("lat", "ang2", u.degree),
RepresentationMapping("distance", "howfar", u.kpc),
]
}
i2 = NewICRS2(ang1=10 * u.degree, ang2=-12 * u.deg, howfar=1000 * u.pc)
assert allclose(i2.ang1, 10 * u.deg)
assert i2.ang1.unit == u.hourangle
assert allclose(i2.ang2, -12 * u.deg)
assert allclose(i2.howfar, 1000 * u.pc)
assert i2.howfar.unit == u.kpc
# Test that the differential kwargs get overridden
class NewICRS3(ICRS):
frame_specific_representation_info = {
r.SphericalCosLatDifferential: [
RepresentationMapping("d_lon_coslat", "pm_ang1", u.hourangle / u.year),
RepresentationMapping("d_lat", "pm_ang2"),
RepresentationMapping("d_distance", "vlos", u.kpc / u.Myr),
]
}
i3 = NewICRS3(
lon=10 * u.degree,
lat=-12 * u.deg,
distance=1000 * u.pc,
pm_ang1=1 * u.mas / u.yr,
pm_ang2=2 * u.mas / u.yr,
vlos=100 * u.km / u.s,
)
assert allclose(i3.pm_ang1, 1 * u.mas / u.yr)
assert i3.pm_ang1.unit == u.hourangle / u.year
assert allclose(i3.pm_ang2, 2 * u.mas / u.yr)
assert allclose(i3.vlos, 100 * u.km / u.s)
assert i3.vlos.unit == u.kpc / u.Myr
def test_realizing():
rep = r.SphericalRepresentation(1 * u.deg, 2 * u.deg, 3 * u.kpc)
i = ICRS()
i2 = i.realize_frame(rep)
assert not i.has_data
assert i2.has_data
f = FK5(equinox=Time("J2001"))
f2 = f.realize_frame(rep)
assert not f.has_data
assert f2.has_data
assert f2.equinox == f.equinox
assert f2.equinox != FK5.get_frame_attr_defaults()["equinox"]
# Check that a nicer error message is returned:
with pytest.raises(
TypeError, match="Class passed as data instead of a representation"
):
f.realize_frame(f.representation_type)
def test_replicating():
i = ICRS(ra=[1] * u.deg, dec=[2] * u.deg)
icopy = i.replicate(copy=True)
irepl = i.replicate(copy=False)
i.data._lat[:] = 0 * u.deg
assert np.all(i.data.lat == irepl.data.lat)
assert np.all(i.data.lat != icopy.data.lat)
iclone = i.replicate_without_data()
assert i.has_data
assert not iclone.has_data
aa = AltAz(alt=1 * u.deg, az=2 * u.deg, obstime=Time("J2000"))
aaclone = aa.replicate_without_data(obstime=Time("J2001"))
assert not aaclone.has_data
assert aa.obstime != aaclone.obstime
assert aa.pressure == aaclone.pressure
assert aa.obswl == aaclone.obswl
def test_getitem():
rep = r.SphericalRepresentation(
[1, 2, 3] * u.deg, [4, 5, 6] * u.deg, [7, 8, 9] * u.kpc
)
i = ICRS(rep)
assert len(i.ra) == 3
iidx = i[1:]
assert len(iidx.ra) == 2
iidx2 = i[0]
assert iidx2.ra.isscalar
def test_transform():
"""
This test just makes sure the transform architecture works, but does *not*
actually test all the builtin transforms themselves are accurate.
"""
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ == r.UnitSphericalRepresentation
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[5, 6] * u.kpc)
f = i.transform_to(FK5())
i2 = f.transform_to(ICRS())
assert i2.data.__class__ != r.UnitSphericalRepresentation
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f4 = f.transform_to(FK4())
f4_2 = f.transform_to(FK4(equinox=f.equinox))
# make sure attributes are copied over correctly
assert f4.equinox == FK4().equinox
assert f4_2.equinox == f.equinox
# make sure self-transforms also work
i = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i.transform_to(ICRS())
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
f = FK5(ra=1 * u.deg, dec=2 * u.deg, equinox=Time("J2001"))
f2 = f.transform_to(FK5()) # default equinox, so should be *different*
assert f2.equinox == FK5().equinox
with pytest.raises(AssertionError):
assert_allclose(f.ra, f2.ra)
with pytest.raises(AssertionError):
assert_allclose(f.dec, f2.dec)
# finally, check Galactic round-tripping
i1 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg)
i2 = i1.transform_to(Galactic()).transform_to(ICRS())
assert_allclose(i1.ra, i2.ra)
assert_allclose(i1.dec, i2.dec)
def test_transform_to_nonscalar_nodata_frame():
# https://github.com/astropy/astropy/pull/5254#issuecomment-241592353
times = Time("2016-08-23") + np.linspace(0, 10, 12) * u.day
coo1 = ICRS(
ra=[[0.0], [10.0], [20.0]] * u.deg, dec=[[-30.0], [30.0], [60.0]] * u.deg
)
coo2 = coo1.transform_to(FK5(equinox=times))
assert coo2.shape == (3, 12)
def test_setitem_no_velocity():
"""Test different flavors of item setting for a Frame without a velocity."""
obstime = "B1955"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = sc0.copy()
sc1_repr = repr(sc1)
assert "representation" in sc1.cache
sc1[1] = sc2[0]
assert sc1.cache == {}
assert repr(sc2) != sc1_repr
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
# Works for array-valued obstime so long as they are considered equivalent
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, obstime])
sc1[0] = sc2[0]
# Multidimensional coordinates
sc1 = FK4([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc2 = FK4([[10, 20], [30, 40]] * u.deg, [[50, 60], [70, 80]] * u.deg)
sc1[0] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [[10, 20], [3, 4]])
assert np.allclose(sc1.dec.to_value(u.deg), [[50, 60], [7, 8]])
def test_setitem_velocities():
"""Test different flavors of item setting for a Frame with a velocity."""
sc0 = FK4(
[1, 2] * u.deg,
[3, 4] * u.deg,
radial_velocity=[1, 2] * u.km / u.s,
obstime="B1950",
)
sc2 = FK4(
[10, 20] * u.deg,
[30, 40] * u.deg,
radial_velocity=[10, 20] * u.km / u.s,
obstime="B1950",
)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == sc2.obstime
assert sc1.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
obstime = "B1950"
sc0 = FK4([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = FK4([10, 20] * u.deg, [30, 40] * u.deg, obstime=obstime)
sc1 = Galactic(sc0.ra, sc0.dec)
with pytest.raises(
TypeError, match="can only set from object of same class: Galactic vs. FK4"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime="B2001")
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra[0], sc0.dec[0], obstime=obstime)
with pytest.raises(
TypeError, match="scalar 'FK4' frame object does not support item assignment"
):
sc1[0] = sc2[0]
sc1 = FK4(obstime=obstime)
with pytest.raises(ValueError, match="cannot set frame which has no data"):
sc1[0] = sc2[0]
sc1 = FK4(sc0.ra, sc0.dec, obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
# Wrong shape
sc1 = FK4([sc0.ra], [sc0.dec], obstime=[obstime, "B1980"])
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1[0] = sc2[0]
def test_sep():
i1 = ICRS(ra=0 * u.deg, dec=1 * u.deg)
i2 = ICRS(ra=0 * u.deg, dec=2 * u.deg)
sep = i1.separation(i2)
assert_allclose(sep.deg, 1.0)
i3 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[5, 6] * u.kpc)
i4 = ICRS(ra=[1, 2] * u.deg, dec=[3, 4] * u.deg, distance=[4, 5] * u.kpc)
sep3d = i3.separation_3d(i4)
assert_allclose(sep3d.to(u.kpc), np.array([1, 1]) * u.kpc)
# check that it works even with velocities
i5 = ICRS(
ra=[1, 2] * u.deg,
dec=[3, 4] * u.deg,
distance=[5, 6] * u.kpc,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
radial_velocity=[5, 6] * u.km / u.s,
)
i6 = ICRS(
ra=[1, 2] * u.deg,
dec=[3, 4] * u.deg,
distance=[7, 8] * u.kpc,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
radial_velocity=[5, 6] * u.km / u.s,
)
sep3d = i5.separation_3d(i6)
assert_allclose(sep3d.to(u.kpc), np.array([2, 2]) * u.kpc)
# 3d separations of dimensionless distances should still work
i7 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=3 * u.one)
i8 = ICRS(ra=1 * u.deg, dec=2 * u.deg, distance=4 * u.one)
sep3d = i7.separation_3d(i8)
assert_allclose(sep3d, 1 * u.one)
# but should fail with non-dimensionless
with pytest.raises(ValueError):
i7.separation_3d(i3)
def test_time_inputs():
"""
Test validation and conversion of inputs for equinox and obstime attributes.
"""
c = FK4(1 * u.deg, 2 * u.deg, equinox="J2001.5", obstime="2000-01-01 12:00:00")
assert c.equinox == Time("J2001.5")
assert c.obstime == Time("2000-01-01 12:00:00")
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5)
assert "Invalid time input" in str(err.value)
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, obstime="hello")
assert "Invalid time input" in str(err.value)
# A vector time should work if the shapes match, but we don't automatically
# broadcast the basic data (just like time).
FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=["J2000", "J2001"])
with pytest.raises(ValueError) as err:
FK4(1 * u.deg, 2 * u.deg, obstime=["J2000", "J2001"])
assert "shape" in str(err.value)
def test_is_frame_attr_default():
"""
Check that the `is_frame_attr_default` machinery works as expected
"""
c1 = FK5(ra=1 * u.deg, dec=1 * u.deg)
c2 = FK5(
ra=1 * u.deg, dec=1 * u.deg, equinox=FK5.get_frame_attr_defaults()["equinox"]
)
c3 = FK5(ra=1 * u.deg, dec=1 * u.deg, equinox=Time("J2001.5"))
assert c1.equinox == c2.equinox
assert c1.equinox != c3.equinox
assert c1.is_frame_attr_default("equinox")
assert not c2.is_frame_attr_default("equinox")
assert not c3.is_frame_attr_default("equinox")
c4 = c1.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
c5 = c2.realize_frame(r.UnitSphericalRepresentation(3 * u.deg, 4 * u.deg))
assert c4.is_frame_attr_default("equinox")
assert not c5.is_frame_attr_default("equinox")
def test_altaz_attributes():
aa = AltAz(1 * u.deg, 2 * u.deg)
assert aa.obstime is None
assert aa.location is None
aa2 = AltAz(1 * u.deg, 2 * u.deg, obstime="J2000")
assert aa2.obstime == Time("J2000")
aa3 = AltAz(
1 * u.deg, 2 * u.deg, location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m)
)
assert isinstance(aa3.location, EarthLocation)
def test_hadec_attributes():
hd = HADec(1 * u.hourangle, 2 * u.deg)
assert hd.ha == 1.0 * u.hourangle
assert hd.dec == 2 * u.deg
assert hd.obstime is None
assert hd.location is None
hd2 = HADec(
23 * u.hourangle,
-2 * u.deg,
obstime="J2000",
location=EarthLocation(0 * u.deg, 0 * u.deg, 0 * u.m),
)
assert_allclose(hd2.ha, -1 * u.hourangle)
assert hd2.dec == -2 * u.deg
assert hd2.obstime == Time("J2000")
assert isinstance(hd2.location, EarthLocation)
sr = hd2.represent_as(r.SphericalRepresentation)
assert_allclose(sr.lon, -1 * u.hourangle)
def test_itrs_earth_location():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
sat = EarthLocation(
lat=-24.6609379 * u.deg, lon=160.34199789 * u.deg, height=420.17927591 * u.km
)
itrs_geo = sat.get_itrs()
eloc = itrs_geo.earth_location
assert_allclose(sat.lon, eloc.lon)
assert_allclose(sat.lat, eloc.lat)
assert_allclose(sat.height, eloc.height)
topo_itrs_repr = itrs_geo.cartesian - loc.get_itrs().cartesian
itrs_topo = ITRS(topo_itrs_repr, location=loc)
eloc = itrs_topo.earth_location
assert_allclose(sat.lon, eloc.lon)
assert_allclose(sat.lat, eloc.lat)
assert_allclose(sat.height, eloc.height)
obstime = Time("J2010") # Anything different from default
topo_itrs_repr2 = sat.get_itrs(obstime).cartesian - loc.get_itrs(obstime).cartesian
itrs_topo2 = ITRS(topo_itrs_repr2, location=loc, obstime=obstime)
eloc2 = itrs_topo2.earth_location
assert_allclose(sat.lon, eloc2.lon)
assert_allclose(sat.lat, eloc2.lat)
assert_allclose(sat.height, eloc2.height)
def test_representation():
"""
Test the getter and setter properties for `representation`
"""
# Create the frame object.
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
data = icrs.data
# Create some representation objects.
icrs_cart = icrs.cartesian
icrs_spher = icrs.spherical
icrs_cyl = icrs.cylindrical
# Testing when `_representation` set to `CartesianRepresentation`.
icrs.representation_type = r.CartesianRepresentation
assert icrs.representation_type == r.CartesianRepresentation
assert icrs_cart.x == icrs.x
assert icrs_cart.y == icrs.y
assert icrs_cart.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CartesianRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing when `_representation` set to `CylindricalRepresentation`.
icrs.representation_type = r.CylindricalRepresentation
assert icrs.representation_type == r.CylindricalRepresentation
assert icrs.data == data
# Testing setter input using text argument for spherical.
icrs.representation_type = "spherical"
assert icrs.representation_type is r.SphericalRepresentation
assert icrs_spher.lat == icrs.dec
assert icrs_spher.lon == icrs.ra
assert icrs_spher.distance == icrs.distance
assert icrs.data == data
# Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes.
for attr in ("x", "y", "z"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
# Testing setter input using text argument for cylindrical.
icrs.representation_type = "cylindrical"
assert icrs.representation_type is r.CylindricalRepresentation
assert icrs_cyl.rho == icrs.rho
assert icrs_cyl.phi == icrs.phi
assert icrs_cyl.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CylindricalRepresentation must not have spherical attributes.
for attr in ("ra", "dec", "distance"):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert "object has no attribute" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = "WRONG"
assert "but must be a BaseRepresentation class" in str(err.value)
with pytest.raises(ValueError) as err:
icrs.representation_type = ICRS
assert "but must be a BaseRepresentation class" in str(err.value)
def test_represent_as():
icrs = ICRS(ra=1 * u.deg, dec=1 * u.deg)
cart1 = icrs.represent_as("cartesian")
cart2 = icrs.represent_as(r.CartesianRepresentation)
assert cart1.x == cart2.x
assert cart1.y == cart2.y
assert cart1.z == cart2.z
# now try with velocities
icrs = ICRS(
ra=0 * u.deg,
dec=0 * u.deg,
distance=10 * u.kpc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=1 * u.km / u.s,
)
# single string
rep2 = icrs.represent_as("cylindrical")
assert isinstance(rep2, r.CylindricalRepresentation)
assert isinstance(rep2.differentials["s"], r.CylindricalDifferential)
# single class with positional in_frame_units, verify that warning raised
with pytest.warns(AstropyWarning, match="argument position") as w:
icrs.represent_as(r.CylindricalRepresentation, False)
assert len(w) == 1
# TODO: this should probably fail in the future once we figure out a better
# workaround for dealing with UnitSphericalRepresentation's with
# RadialDifferential's
# two classes
# rep2 = icrs.represent_as(r.CartesianRepresentation,
# r.SphericalCosLatDifferential)
# assert isinstance(rep2, r.CartesianRepresentation)
# assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential)
with pytest.raises(ValueError):
icrs.represent_as("odaigahara")
def test_shorthand_representations():
rep = r.CartesianRepresentation([1, 2, 3] * u.pc)
dif = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
rep = rep.with_differentials(dif)
icrs = ICRS(rep)
cyl = icrs.cylindrical
assert isinstance(cyl, r.CylindricalRepresentation)
assert isinstance(cyl.differentials["s"], r.CylindricalDifferential)
sph = icrs.spherical
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalDifferential)
sph = icrs.sphericalcoslat
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials["s"], r.SphericalCosLatDifferential)
def test_equal():
obstime = "B1955"
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = FK4([1, 20] * u.deg, [3, 4] * u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 2] * u.km / u.s)
sc2 = FK4([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 20] * u.km / u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
assert (FK4() == ICRS()) is False
assert (FK4() == FK4(obstime="J1999")) is False
def test_equal_exceptions():
# Shape mismatch
sc1 = FK4([1, 2, 3] * u.deg, [3, 4, 5] * u.deg)
with pytest.raises(ValueError, match="cannot compare: shape mismatch"):
sc1 == sc1[:2] # noqa: B015
# Different representation_type
sc1 = FK4(1, 2, 3, representation_type="cartesian")
sc2 = FK4(1 * u.deg, 2 * u.deg, 2, representation_type="spherical")
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: CartesianRepresentation vs. SphericalRepresentation"
),
):
sc1 == sc2 # noqa: B015
# Different differential type
sc1 = FK4(1 * u.deg, 2 * u.deg, radial_velocity=1 * u.km / u.s)
sc2 = FK4(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=1 * u.mas / u.yr, pm_dec=1 * u.mas / u.yr
)
with pytest.raises(
TypeError,
match=(
"cannot compare: objects must have same "
"class: RadialDifferential vs. UnitSphericalCosLatDifferential"
),
):
sc1 == sc2 # noqa: B015
# Different frame attribute
sc1 = FK5(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J1999")
with pytest.raises(
TypeError,
match=r"cannot compare: objects must have equivalent "
r"frames: <FK5 Frame \(equinox=J2000.000\)> "
r"vs. <FK5 Frame \(equinox=J1999.000\)>",
):
sc1 == sc2 # noqa: B015
# Different frame
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
with pytest.raises(
TypeError,
match="cannot compare: objects must have equivalent "
r"frames: <FK4 Frame \(equinox=B1950.000, obstime=B1950.000\)> "
r"vs. <FK5 Frame \(equinox=J2000.000\)>",
):
sc1 == sc2 # noqa: B015
sc1 = FK4(1 * u.deg, 2 * u.deg)
sc2 = FK4()
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc1 == sc2 # noqa: B015
with pytest.raises(
ValueError, match="cannot compare: one frame has data and the other does not"
):
sc2 == sc1 # noqa: B015
def test_dynamic_attrs():
c = ICRS(1 * u.deg, 2 * u.deg)
assert "ra" in dir(c)
assert "dec" in dir(c)
with pytest.raises(AttributeError) as err:
c.blahblah
assert "object has no attribute 'blahblah'" in str(err.value)
with pytest.raises(AttributeError) as err:
c.ra = 1
assert "Cannot set any frame attribute" in str(err.value)
c.blahblah = 1
assert c.blahblah == 1
def test_nodata_error():
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.data
assert "does not have associated data" in str(excinfo.value)
def test_len0_data():
i = ICRS([] * u.deg, [] * u.deg)
assert i.has_data
repr(i)
def test_quantity_attributes():
# make sure we can create a GCRS frame with valid inputs
GCRS(obstime="J2002", obsgeoloc=[1, 2, 3] * u.km, obsgeovel=[4, 5, 6] * u.km / u.s)
# make sure it fails for invalid lovs or vels
with pytest.raises(TypeError):
GCRS(obsgeoloc=[1, 2, 3]) # no unit
with pytest.raises(u.UnitsError):
GCRS(obsgeoloc=[1, 2, 3] * u.km / u.s) # incorrect unit
with pytest.raises(ValueError):
GCRS(obsgeoloc=[1, 3] * u.km) # incorrect shape
def test_quantity_attribute_default():
# The default default (yes) is None:
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.deg)
frame = MyCoord()
assert frame.someval is None
frame = MyCoord(someval=15 * u.deg)
assert u.isclose(frame.someval, 15 * u.deg)
# This should work if we don't explicitly pass in a unit, but we pass in a
# default value with a unit
class MyCoord2(BaseCoordinateFrame):
someval = QuantityAttribute(15 * u.deg)
frame = MyCoord2()
assert u.isclose(frame.someval, 15 * u.deg)
# Since here no shape was given, we can set to any shape we like.
frame = MyCoord2(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert np.all(frame.someval == 1 * u.deg)
# We should also be able to insist on a given shape.
class MyCoord3(BaseCoordinateFrame):
someval = QuantityAttribute(unit=u.arcsec, shape=(3,))
frame = MyCoord3(someval=np.ones(3) * u.deg)
assert frame.someval.shape == (3,)
assert frame.someval.unit == u.arcsec
assert u.allclose(frame.someval.value, 3600.0)
# The wrong shape raises.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=1.0 * u.deg)
# As does the wrong unit.
with pytest.raises(u.UnitsError):
MyCoord3(someval=np.ones(3) * u.m)
# We are allowed a short-cut for zero.
frame0 = MyCoord3(someval=0)
assert frame0.someval.shape == (3,)
assert frame0.someval.unit == u.arcsec
assert np.all(frame0.someval.value == 0.0)
# But not if it has the wrong shape.
with pytest.raises(ValueError, match="shape"):
MyCoord3(someval=np.zeros(2))
# This should fail, if we don't pass in a default or a unit
with pytest.raises(ValueError):
class MyCoord(BaseCoordinateFrame):
someval = QuantityAttribute()
def test_eloc_attributes():
el = EarthLocation(lon=12.3 * u.deg, lat=45.6 * u.deg, height=1 * u.km)
it = ITRS(
r.SphericalRepresentation(lon=12.3 * u.deg, lat=45.6 * u.deg, distance=1 * u.km)
)
gc = GCRS(ra=12.3 * u.deg, dec=45.6 * u.deg, distance=6375 * u.km)
el1 = AltAz(location=el).location
assert isinstance(el1, EarthLocation)
# these should match *exactly* because the EarthLocation
assert el1.lat == el.lat
assert el1.lon == el.lon
assert el1.height == el.height
el2 = AltAz(location=it).location
assert isinstance(el2, EarthLocation)
# these should *not* match because giving something in Spherical ITRS is
# *not* the same as giving it as an EarthLocation: EarthLocation is on an
# elliptical geoid. So the longitude should match (because flattening is
# only along the z-axis), but latitude should not. Also, height is relative
# to the *surface* in EarthLocation, but the ITRS distance is relative to
# the center of the Earth
assert not allclose(el2.lat, it.spherical.lat)
assert allclose(el2.lon, it.spherical.lon)
assert el2.height < -6000 * u.km
el3 = AltAz(location=gc).location
# GCRS inputs implicitly get transformed to ITRS and then onto
# EarthLocation's elliptical geoid. So both lat and lon shouldn't match
assert isinstance(el3, EarthLocation)
assert not allclose(el3.lat, gc.dec)
assert not allclose(el3.lon, gc.ra)
assert np.abs(el3.height) < 500 * u.km
def test_equivalent_frames():
i = ICRS()
i2 = ICRS(1 * u.deg, 2 * u.deg)
assert i.is_equivalent_frame(i)
assert i.is_equivalent_frame(i2)
with pytest.raises(TypeError):
assert i.is_equivalent_frame(10)
with pytest.raises(TypeError):
assert i2.is_equivalent_frame(SkyCoord(i2))
f0 = FK5() # this J2000 is TT
f1 = FK5(equinox="J2000")
f2 = FK5(1 * u.deg, 2 * u.deg, equinox="J2000")
f3 = FK5(equinox="J2010")
f4 = FK4(equinox="J2010")
assert f1.is_equivalent_frame(f1)
assert not i.is_equivalent_frame(f1)
assert f0.is_equivalent_frame(f1)
assert f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f3.is_equivalent_frame(f4)
aa1 = AltAz()
aa2 = AltAz(obstime="J2010")
assert aa2.is_equivalent_frame(aa2)
assert not aa1.is_equivalent_frame(i)
assert not aa1.is_equivalent_frame(aa2)
def test_equivalent_frame_coordinateattribute():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
# These frames should not be considered equivalent
f0 = FrameWithCoordinateAttribute()
f1 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2000")
)
f2 = FrameWithCoordinateAttribute(
coord_attr=HCRS(3 * u.deg, 4 * u.deg, obstime="J2000")
)
f3 = FrameWithCoordinateAttribute(
coord_attr=HCRS(1 * u.deg, 2 * u.deg, obstime="J2001")
)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
assert not f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f2.is_equivalent_frame(f3)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
assert f2.is_equivalent_frame(deepcopy(f2))
assert f3.is_equivalent_frame(deepcopy(f3))
def test_equivalent_frame_locationattribute():
class FrameWithLocationAttribute(BaseCoordinateFrame):
loc_attr = EarthLocationAttribute()
# These frames should not be considered equivalent
f0 = FrameWithLocationAttribute()
location = EarthLocation(lat=-34, lon=19, height=300)
f1 = FrameWithLocationAttribute(loc_attr=location)
assert not f0.is_equivalent_frame(f1)
assert not f1.is_equivalent_frame(f0)
# They each should still be equivalent to a deep copy of themselves
assert f0.is_equivalent_frame(deepcopy(f0))
assert f1.is_equivalent_frame(deepcopy(f1))
def test_representation_subclass():
# Regression test for #3354
# Normally when instantiating a frame without a distance the frame will try
# and use UnitSphericalRepresentation internally instead of
# SphericalRepresentation.
frame = FK5(
representation_type=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == r.SphericalRepresentation
# If using a SphericalRepresentation class this used to not work, so we
# test here that this is now fixed.
class NewSphericalRepresentation(r.SphericalRepresentation):
attr_classes = r.SphericalRepresentation.attr_classes
frame = FK5(
representation_type=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg
)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation_type == NewSphericalRepresentation
# A similar issue then happened in __repr__ with subclasses of
# SphericalRepresentation.
assert (
repr(frame)
== "<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n (32., 20.)>"
)
# A more subtle issue is when specifying a custom
# UnitSphericalRepresentation subclass for the data and
# SphericalRepresentation or a subclass for the representation.
class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation):
attr_classes = r.UnitSphericalRepresentation.attr_classes
def __repr__(self):
return "<NewUnitSphericalRepresentation: spam spam spam>"
frame = FK5(
NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg),
representation_type=NewSphericalRepresentation,
)
assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
c = ICRS([1, 1] * u.deg, [2, 2] * u.deg)
c.representation_type = "cartesian"
assert c[0].representation_type is r.CartesianRepresentation
def test_component_error_useful():
"""
Check that a data-less frame gives useful error messages about not having
data when the attributes asked for are possible coordinate components
"""
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.ra
assert "does not have associated data" in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo1:
i.foobar
with pytest.raises(AttributeError) as excinfo2:
i.lon # lon is *not* the component name despite being the underlying representation's name
assert "object has no attribute 'foobar'" in str(excinfo1.value)
assert "object has no attribute 'lon'" in str(excinfo2.value)
def test_cache_clear():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache["representation"]) == 2
i.cache.clear()
assert len(i.cache["representation"]) == 0
def test_inplace_array():
i = ICRS([[1, 2], [3, 4]] * u.deg, [[10, 20], [30, 40]] * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[:, 0] = [100, 200] * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert_allclose(i.ra, [[100, 2], [200, 4]] * u.deg)
assert_allclose(i.dec, [[10, 20], [30, 40]] * u.deg)
def test_inplace_change():
i = ICRS(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache["representation"]) == 2
# Modify the data
i.data.lon[()] = 10 * u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert i.ra == 10 * u.deg
assert i.dec == 2 * u.deg
def test_representation_with_multiple_differentials():
dif1 = r.CartesianDifferential([1, 2, 3] * u.km / u.s)
dif2 = r.CartesianDifferential([1, 2, 3] * u.km / u.s**2)
rep = r.CartesianRepresentation(
[1, 2, 3] * u.pc, differentials={"s": dif1, "s2": dif2}
)
# check warning is raised for a scalar
with pytest.raises(ValueError):
ICRS(rep)
def test_missing_component_error_names():
"""
This test checks that the component names are frame component names, not
representation or differential names, when referenced in an exception raised
when not passing in enough data. For example:
ICRS(ra=10*u.deg)
should state:
TypeError: __init__() missing 1 required positional argument: 'dec'
"""
with pytest.raises(TypeError) as e:
ICRS(ra=150 * u.deg)
assert "missing 1 required positional argument: 'dec'" in str(e.value)
with pytest.raises(TypeError) as e:
ICRS(
ra=150 * u.deg,
dec=-11 * u.deg,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert "pm_ra_cosdec" in str(e.value)
def test_non_spherical_representation_unit_creation(unitphysics): # noqa: F811
class PhysicsICRS(ICRS):
default_representation = r.PhysicsSphericalRepresentation
pic = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg, r=1 * u.kpc)
assert isinstance(pic.data, r.PhysicsSphericalRepresentation)
picu = PhysicsICRS(phi=1 * u.deg, theta=25 * u.deg)
assert isinstance(picu.data, unitphysics)
def test_attribute_repr():
class Spam:
def _astropy_repr_in_frame(self):
return "TEST REPR"
class TestFrame(BaseCoordinateFrame):
attrtest = Attribute(default=Spam())
assert "TEST REPR" in repr(TestFrame())
def test_component_names_repr():
# Frame class with new component names that includes a name swap
class NameChangeFrame(BaseCoordinateFrame):
default_representation = r.PhysicsSphericalRepresentation
frame_specific_representation_info = {
r.PhysicsSphericalRepresentation: [
RepresentationMapping("phi", "theta", u.deg),
RepresentationMapping("theta", "phi", u.arcsec),
RepresentationMapping("r", "JUSTONCE", u.AU),
]
}
frame = NameChangeFrame(0 * u.deg, 0 * u.arcsec, 0 * u.AU)
# Check for the new names in the Frame repr
assert "(theta, phi, JUSTONCE)" in repr(frame)
# Check that the letter "r" has not been replaced more than once in the Frame repr
assert repr(frame).count("JUSTONCE") == 1
def test_galactocentric_defaults():
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
with galactocentric_frame_defaults.set("latest"):
galcen_latest = Galactocentric()
# parameters that changed
assert not u.allclose(galcen_pre40.galcen_distance, galcen_40.galcen_distance)
assert not u.allclose(galcen_pre40.z_sun, galcen_40.z_sun)
for k in galcen_40.frame_attributes:
if isinstance(getattr(galcen_40, k), BaseCoordinateFrame):
continue # skip coordinate comparison...
elif isinstance(getattr(galcen_40, k), CartesianDifferential):
assert u.allclose(
getattr(galcen_40, k).d_xyz, getattr(galcen_latest, k).d_xyz
)
else:
assert getattr(galcen_40, k) == getattr(galcen_latest, k)
# test validate Galactocentric
with galactocentric_frame_defaults.set("latest"):
params = galactocentric_frame_defaults.validate(galcen_latest)
references = galcen_latest.frame_attribute_references
state = dict(parameters=params, references=references)
assert galactocentric_frame_defaults.parameters == params
assert galactocentric_frame_defaults.references == references
assert galactocentric_frame_defaults._state == state
# Test not one of accepted parameter types
with pytest.raises(ValueError):
galactocentric_frame_defaults.validate(ValueError)
# test parameters property
assert (
galactocentric_frame_defaults.parameters
== galactocentric_frame_defaults.parameters
)
def test_galactocentric_references():
# references in the "scientific paper"-sense
with galactocentric_frame_defaults.set("pre-v4.0"):
galcen_pre40 = Galactocentric()
for k in galcen_pre40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_pre40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_40 = Galactocentric()
for k in galcen_40.frame_attributes:
if k == "roll": # no reference for this parameter
continue
assert k in galcen_40.frame_attribute_references
with galactocentric_frame_defaults.set("v4.0"):
galcen_custom = Galactocentric(z_sun=15 * u.pc)
for k in galcen_custom.frame_attributes:
if k == "roll": # no reference for this parameter
continue
if k == "z_sun":
assert k not in galcen_custom.frame_attribute_references
else:
assert k in galcen_custom.frame_attribute_references
def test_coordinateattribute_transformation():
class FrameWithCoordinateAttribute(BaseCoordinateFrame):
coord_attr = CoordinateAttribute(HCRS)
hcrs = HCRS(1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-02-03")
f1_frame = FrameWithCoordinateAttribute(coord_attr=hcrs)
f1_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(hcrs))
# The input is already HCRS, so the frame attribute should not change it
assert f1_frame.coord_attr == hcrs
# The output should not be different if a SkyCoord is provided
assert f1_skycoord.coord_attr == f1_frame.coord_attr
gcrs = GCRS(4 * u.deg, 5 * u.deg, 6 * u.AU, obstime="2004-05-06")
f2_frame = FrameWithCoordinateAttribute(coord_attr=gcrs)
f2_skycoord = FrameWithCoordinateAttribute(coord_attr=SkyCoord(gcrs))
# The input needs to be converted from GCRS to HCRS
assert isinstance(f2_frame.coord_attr, HCRS)
# The `obstime` frame attribute should have been "merged" in a SkyCoord-style transformation
assert f2_frame.coord_attr.obstime == gcrs.obstime
# The output should not be different if a SkyCoord is provided
assert f2_skycoord.coord_attr == f2_frame.coord_attr
def test_realize_frame_accepts_kwargs():
c1 = ICRS(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
representation_type=r.CartesianRepresentation,
)
new_data = r.CartesianRepresentation(x=11 * u.pc, y=12 * u.pc, z=13 * u.pc)
c2 = c1.realize_frame(new_data, representation_type="cartesian")
c3 = c1.realize_frame(new_data, representation_type="cylindrical")
assert c2.representation_type == r.CartesianRepresentation
assert c3.representation_type == r.CylindricalRepresentation
def test_nameless_frame_subclass():
"""Note: this is a regression test for #11096"""
class Test:
pass
# Subclass from a frame class and a non-frame class.
# This subclassing is the test!
class NewFrame(ICRS, Test):
pass
def test_frame_coord_comparison():
"""Test that frame can be compared to a SkyCoord"""
frame = ICRS(0 * u.deg, 0 * u.deg)
coord = SkyCoord(frame)
other = SkyCoord(ICRS(0 * u.deg, 1 * u.deg))
assert frame == coord
assert frame != other
assert not (frame == other)
error_msg = "objects must have equivalent frames"
with pytest.raises(TypeError, match=error_msg):
frame == SkyCoord(AltAz("0d", "1d")) # noqa: B015
coord = SkyCoord(ra=12 * u.hourangle, dec=5 * u.deg, frame=FK5(equinox="J1950"))
frame = FK5(ra=12 * u.hourangle, dec=5 * u.deg, equinox="J2000")
with pytest.raises(TypeError, match=error_msg):
coord == frame # noqa: B015
frame = ICRS()
coord = SkyCoord(0 * u.deg, 0 * u.deg, frame=frame)
error_msg = "Can only compare SkyCoord to Frame with data"
with pytest.raises(ValueError, match=error_msg):
frame == coord # noqa: B015
|
2b031c228fe48c99a7f6ffeede5e938e2423c274d8a6bff4bcd6afeb41327abb | """
This file tests the behavior of subclasses of Representation and Frames
"""
from copy import deepcopy
import astropy.coordinates
import astropy.units as u
from astropy.coordinates import ICRS, Latitude, Longitude
from astropy.coordinates.baseframe import RepresentationMapping, frame_transform_graph
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES,
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.representation.base import _invalidate_reprdiff_cls_hash
from astropy.coordinates.transformations import FunctionTransform
# Classes setup, borrowed from SunPy.
# Here we define the classes *inside* the tests to make sure that we can wipe
# the slate clean when the tests have finished running.
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
_invalidate_reprdiff_cls_hash()
def test_unit_representation_subclass():
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(
cls, angle, unit=unit, wrap_angle=wrap_angle, **kwargs
)
return self
class UnitSphericalWrap180Representation(UnitSphericalRepresentation):
attr_classes = {"lon": Longitude180, "lat": Latitude}
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = {"lon": Longitude180, "lat": Latitude, "distance": u.Quantity}
_unit_representation = UnitSphericalWrap180Representation
class MyFrame(ICRS):
default_representation = SphericalWrap180Representation
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ra"),
RepresentationMapping("lat", "dec"),
]
}
frame_specific_representation_info[
"unitsphericalwrap180"
] = frame_specific_representation_info[
"sphericalwrap180"
] = frame_specific_representation_info[
"spherical"
]
@frame_transform_graph.transform(
FunctionTransform, MyFrame, astropy.coordinates.ICRS
)
def myframe_to_icrs(myframe_coo, icrs):
return icrs.realize_frame(myframe_coo._data)
f = MyFrame(10 * u.deg, 10 * u.deg)
assert isinstance(f._data, UnitSphericalWrap180Representation)
assert isinstance(f.ra, Longitude180)
g = f.transform_to(astropy.coordinates.ICRS())
assert isinstance(g, astropy.coordinates.ICRS)
assert isinstance(g._data, UnitSphericalWrap180Representation)
frame_transform_graph.remove_transform(MyFrame, astropy.coordinates.ICRS, None)
|
0edb0e879e9622a2f5eab0b6b6b4b378dff79312a7837ea4122ebafb4bc98509 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test geodetic representations"""
import pytest
from astropy import units as u
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES,
BaseBodycentricRepresentation,
BaseGeodeticRepresentation,
CartesianRepresentation,
GRS80GeodeticRepresentation,
WGS72GeodeticRepresentation,
WGS84GeodeticRepresentation,
)
from astropy.coordinates.representation.geodetic import ELLIPSOIDS
# Preserve the original REPRESENTATION_CLASSES dict so that importing
# the test file doesn't add a persistent test subclass
from astropy.coordinates.tests.test_representation import ( # noqa: F401
setup_function,
teardown_function,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.units.tests.test_quantity_erfa_ufuncs import vvd
class CustomGeodetic(BaseGeodeticRepresentation):
_flattening = 0.01832
_equatorial_radius = 4000000.0 * u.m
class CustomSphericGeodetic(BaseGeodeticRepresentation):
_flattening = 0.0
_equatorial_radius = 4000000.0 * u.m
class CustomSphericBodycentric(BaseBodycentricRepresentation):
_flattening = 0.0
_equatorial_radius = 4000000.0 * u.m
class IAUMARS2000GeodeticRepresentation(BaseGeodeticRepresentation):
_equatorial_radius = 3396190.0 * u.m
_flattening = 0.5886007555512007 * u.percent
class IAUMARS2000BodycentricRepresentation(BaseBodycentricRepresentation):
_equatorial_radius = 3396190.0 * u.m
_flattening = 0.5886007555512007 * u.percent
def test_geodetic_bodycentric_equivalence_spherical_bodies():
initial_cartesian = CartesianRepresentation(
x=[1, 3000.0] * u.km, y=[7000.0, 4.0] * u.km, z=[5.0, 6000.0] * u.km
)
gd_transformed = CustomSphericGeodetic.from_representation(initial_cartesian)
bc_transformed = CustomSphericBodycentric.from_representation(initial_cartesian)
assert_quantity_allclose(gd_transformed.lon, bc_transformed.lon)
assert_quantity_allclose(gd_transformed.lat, bc_transformed.lat)
assert_quantity_allclose(gd_transformed.height, bc_transformed.height)
@pytest.mark.parametrize(
"geodeticrepresentation",
[
CustomGeodetic,
WGS84GeodeticRepresentation,
IAUMARS2000GeodeticRepresentation,
IAUMARS2000BodycentricRepresentation,
],
)
def test_cartesian_geodetic_roundtrip(geodeticrepresentation):
# Test array-valued input in the process.
initial_cartesian = CartesianRepresentation(
x=[1, 3000.0] * u.km, y=[7000.0, 4.0] * u.km, z=[5.0, 6000.0] * u.km
)
transformed = geodeticrepresentation.from_representation(initial_cartesian)
roundtripped = CartesianRepresentation.from_representation(transformed)
assert_quantity_allclose(initial_cartesian.x, roundtripped.x)
assert_quantity_allclose(initial_cartesian.y, roundtripped.y)
assert_quantity_allclose(initial_cartesian.z, roundtripped.z)
@pytest.mark.parametrize(
"geodeticrepresentation",
[
CustomGeodetic,
WGS84GeodeticRepresentation,
IAUMARS2000GeodeticRepresentation,
IAUMARS2000BodycentricRepresentation,
],
)
def test_geodetic_cartesian_roundtrip(geodeticrepresentation):
initial_geodetic = geodeticrepresentation(
lon=[0.8, 1.3] * u.radian,
lat=[0.3, 0.98] * u.radian,
height=[100.0, 367.0] * u.m,
)
transformed = CartesianRepresentation.from_representation(initial_geodetic)
roundtripped = geodeticrepresentation.from_representation(transformed)
assert_quantity_allclose(initial_geodetic.lon, roundtripped.lon)
assert_quantity_allclose(initial_geodetic.lat, roundtripped.lat)
assert_quantity_allclose(initial_geodetic.height, roundtripped.height)
def test_geocentric_to_geodetic():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
# Here, test the chain. Direct conversion from Cartesian to
# various Geodetic representations is done indirectly in test_earth.
x, y, z = (2e6, 3e6, 5.244e6)
status = 0 # help for copy & paste of vvd
gc = CartesianRepresentation(x, y, z, u.m)
gd = WGS84GeodeticRepresentation.from_cartesian(gc)
e, p, h = gd.lon.to(u.radian), gd.lat.to(u.radian), gd.height.to(u.m)
vvd(e, 0.9827937232473290680, 1e-14, "eraGc2gd", "e1", status)
vvd(p, 0.97160184819075459, 1e-14, "eraGc2gd", "p1", status)
vvd(h, 331.4172461426059892, 1e-8, "eraGc2gd", "h1", status)
gd = gd.represent_as(GRS80GeodeticRepresentation)
e, p, h = gd.lon.to(u.radian), gd.lat.to(u.radian), gd.height.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
gd = gd.represent_as(WGS72GeodeticRepresentation)
e, p, h = gd.lon.to(u.radian), gd.lat.to(u.radian), gd.height.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
def test_geodetic_to_geocentric():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc"""
# These tests are also done implicitly in test_earth.py.
e = 3.1 * u.rad
p = -0.5 * u.rad
h = 2500.0 * u.m
status = 0 # help for copy & paste of vvd
gd = WGS84GeodeticRepresentation(e, p, h)
xyz = gd.to_cartesian().get_xyz()
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
gd = GRS80GeodeticRepresentation(e, p, h)
xyz = gd.to_cartesian().get_xyz()
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
gd = WGS72GeodeticRepresentation(e, p, h)
xyz = gd.to_cartesian().get_xyz()
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
@pytest.mark.parametrize(
"representation",
[WGS84GeodeticRepresentation, IAUMARS2000BodycentricRepresentation],
)
def test_default_height_is_zero(representation):
gd = representation(10 * u.deg, 20 * u.deg)
assert gd.lon == 10 * u.deg
assert gd.lat == 20 * u.deg
assert gd.height == 0 * u.m
@pytest.mark.parametrize(
"representation",
[WGS84GeodeticRepresentation, IAUMARS2000BodycentricRepresentation],
)
def test_non_angle_error(representation):
with pytest.raises(u.UnitTypeError, match="require units equivalent to 'rad'"):
representation(20 * u.m, 20 * u.deg, 20 * u.m)
@pytest.mark.parametrize(
"representation",
[WGS84GeodeticRepresentation, IAUMARS2000BodycentricRepresentation],
)
def test_non_length_error(representation):
with pytest.raises(u.UnitTypeError, match="units of length"):
representation(10 * u.deg, 20 * u.deg, 30)
def test_subclass_bad_ellipsoid():
# Test incomplete initialization.
msg = "module 'erfa' has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
class InvalidCustomEllipsoid(BaseGeodeticRepresentation):
_ellipsoid = "foo"
assert "foo" not in ELLIPSOIDS
assert "invalidcustomellipsoid" not in REPRESENTATION_CLASSES
@pytest.mark.parametrize(
"baserepresentation",
[BaseGeodeticRepresentation, BaseBodycentricRepresentation],
)
def test_geodetic_subclass_missing_equatorial_radius(baserepresentation):
msg = "'_equatorial_radius' and '_flattening'."
with pytest.raises(AttributeError, match=msg):
class MissingCustomAttribute(baserepresentation):
_flattening = 0.075 * u.dimensionless_unscaled
assert "missingcustomattribute" not in REPRESENTATION_CLASSES
|
074994d23b219e9e3b63e6afd3e41dd9906ad14b33dfb317b759ce1dd6bb552c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import builtin_frames as bf
from astropy.coordinates import galactocentric_frame_defaults
from astropy.coordinates import representation as r
from astropy.coordinates.builtin_frames import CIRS, ICRS, Galactic, Galactocentric
from astropy.coordinates.errors import ConvertError
from astropy.units import allclose as quantity_allclose
POSITION_ON_SKY = {"ra": 37.4 * u.deg, "dec": -55.8 * u.deg}
DISTANCE = {"distance": 150 * u.pc}
PROPER_MOTION = {"pm_ra_cosdec": -21.2 * u.mas / u.yr, "pm_dec": 17.1 * u.mas / u.yr}
RADIAL_VELOCITY = {"radial_velocity": 105.7 * u.km / u.s}
CARTESIAN_POSITION = {
"x": 1 * u.pc,
"y": 2 * u.pc,
"z": 3 * u.pc,
"representation_type": r.CartesianRepresentation,
}
CARTESIAN_REPRESENTATION_KEYWORD_STR = {"representation_type": "cartesian"}
CARTESIAN_VELOCITY = {
"v_x": 1 * u.km / u.s,
"v_y": 2 * u.km / u.s,
"v_z": 3 * u.km / u.s,
"differential_type": r.CartesianDifferential,
}
CARTESIAN_DIFFERENTIAL_KEYWORD_STR = {"differential_type": "cartesian"}
def test_api():
# transform observed Barycentric velocities to full-space Galactocentric
with galactocentric_frame_defaults.set("latest"):
icrs = ICRS(**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY)
icrs.transform_to(Galactocentric())
# transform a set of ICRS proper motions to Galactic
ICRS(**POSITION_ON_SKY, **PROPER_MOTION).transform_to(Galactic())
@pytest.mark.parametrize(
"kwargs",
[
POSITION_ON_SKY,
# In Python 3.9 we could write `POSITION_ON_SKY | DISTANCE`
{**POSITION_ON_SKY, **DISTANCE},
{**POSITION_ON_SKY, **PROPER_MOTION},
{**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION},
{**POSITION_ON_SKY, **RADIAL_VELOCITY},
{**POSITION_ON_SKY, **DISTANCE, **RADIAL_VELOCITY},
{**POSITION_ON_SKY, **PROPER_MOTION, **RADIAL_VELOCITY},
{**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY},
# Now test other representation/differential types:
CARTESIAN_POSITION,
{**CARTESIAN_POSITION, **CARTESIAN_REPRESENTATION_KEYWORD_STR},
{**CARTESIAN_POSITION, **CARTESIAN_VELOCITY},
{
**CARTESIAN_POSITION,
**CARTESIAN_VELOCITY,
**CARTESIAN_DIFFERENTIAL_KEYWORD_STR,
},
],
)
def test_all_arg_options(kwargs):
# Here we do a simple thing and just verify that passing kwargs in, we have
# access to the relevant attributes from the resulting object
icrs = ICRS(**kwargs)
gal = icrs.transform_to(Galactic())
repr_gal = repr(gal)
for k in kwargs:
if k == "differential_type":
continue
getattr(icrs, k)
if "pm_ra_cosdec" in kwargs: # should have both
assert "pm_l_cosb" in repr_gal
assert "pm_b" in repr_gal
assert "mas / yr" in repr_gal
if "radial_velocity" not in kwargs:
assert "radial_velocity" not in repr_gal
if "radial_velocity" in kwargs:
assert "radial_velocity" in repr_gal
assert "km / s" in repr_gal
if "pm_ra_cosdec" not in kwargs:
assert "pm_l_cosb" not in repr_gal
assert "pm_b" not in repr_gal
@pytest.mark.parametrize(
"cls,lon,lat",
[
[bf.ICRS, "ra", "dec"],
[bf.FK4, "ra", "dec"],
[bf.FK4NoETerms, "ra", "dec"],
[bf.FK5, "ra", "dec"],
[bf.GCRS, "ra", "dec"],
[bf.HCRS, "ra", "dec"],
[bf.LSR, "ra", "dec"],
[bf.CIRS, "ra", "dec"],
[bf.Galactic, "l", "b"],
[bf.AltAz, "az", "alt"],
[bf.Supergalactic, "sgl", "sgb"],
[bf.GalacticLSR, "l", "b"],
[bf.HeliocentricMeanEcliptic, "lon", "lat"],
[bf.GeocentricMeanEcliptic, "lon", "lat"],
[bf.BarycentricMeanEcliptic, "lon", "lat"],
[bf.PrecessedGeocentric, "ra", "dec"],
],
)
def test_expected_arg_names(cls, lon, lat):
kwargs = {
lon: 37.4 * u.deg,
lat: -55.8 * u.deg,
f"pm_{lon}_cos{lat}": -21.2 * u.mas / u.yr,
f"pm_{lat}": 17.1 * u.mas / u.yr,
}
frame = cls(**kwargs, **DISTANCE, **RADIAL_VELOCITY)
# these data are extracted from the vizier version of XHIP:
# https://vizier.cds.unistra.fr/viz-bin/VizieR-3?-source=+V/137A/XHIP
_xhip_head = """
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
R D pmRA pmDE Di pmGLon pmGLat RV U V W
HIP AJ2000 (deg) EJ2000 (deg) (mas/yr) (mas/yr) GLon (deg) GLat (deg) st (pc) (mas/yr) (mas/yr) (km/s) (km/s) (km/s) (km/s)
------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------
""".strip()
_xhip_data = """
19 000.05331690 +38.30408633 -3.17 -15.37 112.00026470 -23.47789171 247.12 -6.40 -14.33 6.30 7.3 2.0 -17.9
20 000.06295067 +23.52928427 36.11 -22.48 108.02779304 -37.85659811 95.90 29.35 -30.78 37.80 -19.3 16.1 -34.2
21 000.06623581 +08.00723430 61.48 -0.23 101.69697120 -52.74179515 183.68 58.06 -20.23 -11.72 -45.2 -30.9 -1.3
24917 080.09698238 -33.39874984 -4.30 13.40 236.92324669 -32.58047131 107.38 -14.03 -1.15 36.10 -22.4 -21.3 -19.9
59207 182.13915108 +65.34963517 18.17 5.49 130.04157185 51.18258601 56.00 -18.98 -0.49 5.70 1.5 6.1 4.4
87992 269.60730667 +36.87462906 -89.58 72.46 62.98053142 25.90148234 129.60 45.64 105.79 -4.00 -39.5 -15.8 56.7
115110 349.72322473 -28.74087144 48.86 -9.25 23.00447250 -69.52799804 116.87 -8.37 -49.02 15.00 -16.8 -12.2 -23.6
""".strip()
# in principal we could parse the above as a table, but doing it "manually"
# makes this test less tied to Table working correctly
@pytest.mark.parametrize(
"hip,ra,dec,pmra,pmdec,glon,glat,dist,pmglon,pmglat,rv,U,V,W",
[[float(val) for val in row.split()] for row in _xhip_data.split("\n")],
)
def test_xhip_galactic(
hip, ra, dec, pmra, pmdec, glon, glat, dist, pmglon, pmglat, rv, U, V, W
):
i = ICRS(
ra * u.deg,
dec * u.deg,
dist * u.pc,
pm_ra_cosdec=pmra * u.marcsec / u.yr,
pm_dec=pmdec * u.marcsec / u.yr,
radial_velocity=rv * u.km / u.s,
)
g = i.transform_to(Galactic())
# precision is limited by 2-deciimal digit string representation of pms
assert quantity_allclose(
g.pm_l_cosb, pmglon * u.marcsec / u.yr, atol=0.01 * u.marcsec / u.yr
)
assert quantity_allclose(
g.pm_b, pmglat * u.marcsec / u.yr, atol=0.01 * u.marcsec / u.yr
)
# make sure UVW also makes sense
uvwg = g.cartesian.differentials["s"]
# precision is limited by 1-decimal digit string representation of vels
assert quantity_allclose(uvwg.d_x, U * u.km / u.s, atol=0.1 * u.km / u.s)
assert quantity_allclose(uvwg.d_y, V * u.km / u.s, atol=0.1 * u.km / u.s)
assert quantity_allclose(uvwg.d_z, W * u.km / u.s, atol=0.1 * u.km / u.s)
@pytest.mark.parametrize(
"kwargs,expect_success",
(
(POSITION_ON_SKY, False),
({**POSITION_ON_SKY, **DISTANCE}, True),
({**POSITION_ON_SKY, **PROPER_MOTION}, False),
({**POSITION_ON_SKY, **RADIAL_VELOCITY}, False),
({**POSITION_ON_SKY, **DISTANCE, **RADIAL_VELOCITY}, False),
({**POSITION_ON_SKY, **PROPER_MOTION, **RADIAL_VELOCITY}, False),
({**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY}, True),
),
)
def test_frame_affinetransform(kwargs, expect_success):
"""There are already tests in test_transformations.py that check that
an AffineTransform fails without full-space data, but this just checks that
things work as expected at the frame level as well.
"""
with galactocentric_frame_defaults.set("latest"):
icrs = ICRS(**kwargs)
if expect_success:
_ = icrs.transform_to(Galactocentric())
else:
with pytest.raises(ConvertError):
icrs.transform_to(Galactocentric())
def test_differential_type_arg():
"""
Test passing in an explicit differential class to the initializer or
changing the differential class via set_representation_cls
"""
icrs = ICRS(
**POSITION_ON_SKY,
pm_ra=10 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
differential_type=r.UnitSphericalDifferential,
)
assert icrs.pm_ra == 10 * u.mas / u.yr
icrs = ICRS(
**POSITION_ON_SKY,
pm_ra=10 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
differential_type={"s": r.UnitSphericalDifferential},
)
assert icrs.pm_ra == 10 * u.mas / u.yr
icrs = ICRS(
ra=1 * u.deg,
dec=60 * u.deg,
pm_ra_cosdec=10 * u.mas / u.yr,
pm_dec=-11 * u.mas / u.yr,
)
icrs.set_representation_cls(s=r.UnitSphericalDifferential)
assert quantity_allclose(icrs.pm_ra, 20 * u.mas / u.yr)
# incompatible representation and differential
with pytest.raises(TypeError):
ICRS(**POSITION_ON_SKY, **CARTESIAN_VELOCITY)
# specify both
icrs = ICRS(**CARTESIAN_POSITION, **CARTESIAN_VELOCITY)
assert icrs.x == 1 * u.pc
assert icrs.y == 2 * u.pc
assert icrs.z == 3 * u.pc
assert icrs.v_x == 1 * u.km / u.s
assert icrs.v_y == 2 * u.km / u.s
assert icrs.v_z == 3 * u.km / u.s
def test_slicing_preserves_differential():
icrs = ICRS(**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY)
icrs2 = icrs.reshape(1, 1)[:1, 0]
for name in icrs.representation_component_names.keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
for name in icrs.get_representation_component_names("s").keys():
assert getattr(icrs, name) == getattr(icrs2, name)[0]
def test_shorthand_attributes():
# Check that attribute access works
# for array data:
n = 4
icrs1 = ICRS(
ra=np.random.uniform(0, 360, n) * u.deg,
dec=np.random.uniform(-90, 90, n) * u.deg,
distance=100 * u.pc,
pm_ra_cosdec=np.random.normal(0, 100, n) * u.mas / u.yr,
pm_dec=np.random.normal(0, 100, n) * u.mas / u.yr,
radial_velocity=np.random.normal(0, 100, n) * u.km / u.s,
)
v = icrs1.velocity
pm = icrs1.proper_motion
assert quantity_allclose(pm[0], icrs1.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs1.pm_dec)
# for scalar data:
icrs2 = ICRS(**POSITION_ON_SKY, **DISTANCE, **PROPER_MOTION, **RADIAL_VELOCITY)
v = icrs2.velocity
pm = icrs2.proper_motion
assert quantity_allclose(pm[0], icrs2.pm_ra_cosdec)
assert quantity_allclose(pm[1], icrs2.pm_dec)
# check that it fails where we expect:
# no distance
icrs3 = ICRS(**POSITION_ON_SKY, **PROPER_MOTION, **RADIAL_VELOCITY)
with pytest.raises(ValueError):
icrs3.velocity
icrs3.set_representation_cls("cartesian")
assert hasattr(icrs3, "radial_velocity")
assert quantity_allclose(icrs3.radial_velocity, 105.7 * u.km / u.s)
icrs4 = ICRS(**CARTESIAN_POSITION, **CARTESIAN_VELOCITY)
icrs4.radial_velocity
@pytest.mark.parametrize(
"icrs_coords", [POSITION_ON_SKY, {**POSITION_ON_SKY, **PROPER_MOTION}]
)
def test_negative_distance(icrs_coords):
"""Regression test: #7408
Make sure that negative parallaxes turned into distances are handled right
"""
c = ICRS(distance=(-10 * u.mas).to(u.pc, u.parallax()), **icrs_coords)
assert quantity_allclose(c.ra, 37.4 * u.deg)
assert quantity_allclose(c.dec, -55.8 * u.deg)
def test_velocity_units():
"""Check that the differential data given has compatible units
with the time-derivative of representation data"""
with pytest.raises(
ValueError,
match=(
'^x has unit "pc" with physical type "length", but v_x has incompatible'
' unit "" with physical type "dimensionless" instead of the expected'
r' "speed/velocity".$'
),
):
ICRS(**CARTESIAN_POSITION, v_x=1, v_y=2, v_z=3, differential_type="cartesian")
def test_frame_with_velocity_without_distance_can_be_transformed():
rep = CIRS(**POSITION_ON_SKY, **PROPER_MOTION).transform_to(ICRS())
assert "<ICRS Coordinate: (ra, dec, distance) in" in repr(rep)
|
d75f8b1c700c523c9d698e8d2c4bd2b576df555dd3f53f592e7ed145209fecf8 | import numpy as np
import pytest
from numpy.testing import assert_equal
from astropy.coordinates.polarization import (
StokesCoord,
StokesSymbol,
custom_stokes_symbol_mapping,
)
from astropy.table import Table, vstack
from astropy.utils import unbroadcast
def test_scalar():
sk = StokesCoord(2)
assert repr(sk) == "StokesCoord('Q')" == str(sk)
assert sk.value == 2.0
assert sk.symbol == "Q"
def test_vector():
# This also checks that floats are rounded when converting
# to strings
values = [1.2, 1.8, 2.0, 2.2, 2.8]
sk = StokesCoord(values)
assert_equal(sk.value, values)
assert_equal(sk.symbol, np.array(["I", "Q", "Q", "Q", "U"]))
assert repr(sk) == "StokesCoord(['I', 'Q', 'Q', 'Q', 'U'])" == str(sk)
def test_vector_list_init():
sk = StokesCoord(["I", "Q", "Q", "U", "U"])
assert repr(sk) == "StokesCoord(['I', 'Q', 'Q', 'U', 'U'])" == str(sk)
assert_equal(sk.symbol, np.array(["I", "Q", "Q", "U", "U"]))
def test_undefined():
sk = StokesCoord(np.arange(-10, 7))
assert_equal(
sk.symbol,
# fmt: off
np.array(["?", "?", "YX", "XY", "YY", "XX", "LR", "RL",
"LL", "RR", "?", "I", "Q", "U", "V", "?", "?"]),
# fmt: on
)
def test_undefined_init():
with pytest.raises(Exception, match="Unknown stokes symbols.*Spam"):
StokesCoord("Spam")
def test_custom_symbol_mapping():
custom_mapping = {
10000: StokesSymbol("A"),
10001: StokesSymbol("B"),
10002: StokesSymbol("C"),
10003: StokesSymbol("D"),
}
# Check that we can supply a custom mapping
with custom_stokes_symbol_mapping(custom_mapping):
values = [0.6, 1.7, 10000.1, 10002.4]
sk1 = StokesCoord(values)
assert repr(sk1) == "StokesCoord(['I', 'Q', 'A', 'C'])" == str(sk1)
assert_equal(sk1.value, values)
assert_equal(sk1.symbol, np.array(["I", "Q", "A", "C"]))
# Check that the mapping is not active outside the context manager
assert_equal(sk1.symbol, np.array(["I", "Q", "?", "?"]))
# Also not for new StokesCoords
sk2 = StokesCoord(values)
assert_equal(sk2.symbol, np.array(["I", "Q", "?", "?"]))
def test_custom_symbol_mapping_overlap():
# Make a custom mapping that overlaps with some of the existing values
custom_mapping = {
3: StokesSymbol("A"),
4: StokesSymbol("B"),
5: StokesSymbol("C"),
6: StokesSymbol("D"),
}
with custom_stokes_symbol_mapping(custom_mapping):
sk = StokesCoord(np.arange(1, 7))
assert_equal(sk.symbol, np.array(["I", "Q", "A", "B", "C", "D"]))
def test_custom_symbol_mapping_replace():
# Check that we can replace the mapping completely
custom_mapping = {
3: StokesSymbol("A"),
4: StokesSymbol("B"),
5: StokesSymbol("C"),
6: StokesSymbol("D"),
}
with custom_stokes_symbol_mapping(custom_mapping, replace=True):
sk = StokesCoord(np.arange(1, 7))
assert_equal(sk.symbol, np.array(["?", "?", "A", "B", "C", "D"]))
def test_comparison_scalar():
sk = StokesCoord(np.arange(1, 6))
assert_equal("Q" == sk, [False, True, False, False, False])
assert_equal(sk == 1, [True, False, False, False, False])
assert_equal(sk == "Q", [False, True, False, False, False])
def test_comparison_vector():
sk = StokesCoord(np.arange(1, 6))
assert_equal(
sk == np.array(["I", "Q", "I", "I", "Q"]), [True, True, False, False, False]
)
def test_comparison_other_coord():
sk1 = StokesCoord(np.arange(1, 6))
sk2 = StokesCoord("I")
assert_equal(sk1 == sk2, [True, False, False, False, False])
sk3 = StokesCoord(np.repeat(2, 5))
assert_equal(sk1 == sk3, [False, True, False, False, False])
def test_efficient():
# Make sure that if we pass a broadcasted array in we get a broadcasted
# array of symbols.
values = np.broadcast_to(np.arange(1, 5, dtype=float), (512, 256, 4))
sk = StokesCoord(values, copy=False)
assert sk.symbol.shape == (512, 256, 4)
assert unbroadcast(sk.value).shape == (4,)
assert unbroadcast(sk.symbol).shape == (4,)
assert_equal(unbroadcast(sk.symbol), np.array(["I", "Q", "U", "V"]))
def test_broadcast_to():
sk = StokesCoord(np.arange(1, 5, dtype=int), copy=False)
sk2 = np.broadcast_to(sk, (512, 256, 4))
assert sk2.symbol.shape == (512, 256, 4)
assert unbroadcast(sk2.value).shape == (4,)
assert unbroadcast(sk2.symbol).shape == (4,)
assert_equal(unbroadcast(sk.symbol), np.array(["I", "Q", "U", "V"]))
def test_table_vstack_stokes():
sk = StokesCoord(np.arange(1, 5, dtype=int), copy=False)
tt = Table([sk])
assert isinstance(tt["col0"], StokesCoord)
assert np.allclose(tt["col0"].value, np.arange(1, 5, dtype=int))
sk2 = StokesCoord([1, 2, 2, 2, 4, 5])
tt2 = Table([sk2])
assert isinstance(tt2["col0"], StokesCoord)
assert np.allclose(tt2["col0"].value, sk2.value)
tt3 = vstack([tt, tt2])
assert isinstance(tt3["col0"], StokesCoord)
assert len(tt3) == 10
assert np.allclose(tt3["col0"].value, np.array([1, 2, 3, 4, 1, 2, 2, 2, 4, 5]))
def test_init_copy():
input = np.arange(1, 5, dtype=int)
sk1 = StokesCoord(input, copy=False)
assert sk1._data is input
skc = StokesCoord(input, copy=True)
assert skc._data is not input
sk2 = StokesCoord(sk1)
assert sk1._data is sk2._data
sk3 = StokesCoord(sk1, copy=True)
assert sk1._data is not sk3._data
assert np.allclose(sk1._data, sk3._data)
def test_init_error():
with pytest.raises(ValueError, match="object array"):
StokesCoord(None)
|
541c4207092c8210a1ea279675619002cbfd9ab23a8c08cd0110fbc86f8e5438 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains tests for the name resolve convenience module.
"""
import time
import urllib.request
import numpy as np
import pytest
from pytest_remotedata.disable_internet import no_internet
from astropy import units as u
from astropy.config import paths
from astropy.coordinates.name_resolve import (
NameResolveError,
_parse_response,
get_icrs_coordinates,
sesame_database,
sesame_url,
)
from astropy.coordinates.sky_coordinate import SkyCoord
_cached_ngc3642 = dict()
_cached_ngc3642[
"simbad"
] = """# NGC 3642 #Q22523669
#=S=Simbad (via url): 1
%@ 503952
%I.0 NGC 3642
%C.0 LIN
%C.N0 15.15.01.00
%J 170.5750583 +59.0742417 = 11:22:18.01 +59:04:27.2
%V z 1593 0.005327 [0.000060] D 2002LEDA.........0P
%D 1.673 1.657 75 (32767) (I) C 2006AJ....131.1163S
%T 5 =32800000 D 2011A&A...532A..74B
%#B 140
#====Done (2013-Feb-12,16:37:11z)===="""
_cached_ngc3642[
"vizier"
] = """# NGC 3642 #Q22523677
#=V=VizieR (local): 1
%J 170.56 +59.08 = 11:22.2 +59:05
%I.0 {NGC} 3642
#====Done (2013-Feb-12,16:37:42z)===="""
_cached_ngc3642[
"all"
] = """# ngc3642 #Q22523722
#=S=Simbad (via url): 1
%@ 503952
%I.0 NGC 3642
%C.0 LIN
%C.N0 15.15.01.00
%J 170.5750583 +59.0742417 = 11:22:18.01 +59:04:27.2
%V z 1593 0.005327 [0.000060] D 2002LEDA.........0P
%D 1.673 1.657 75 (32767) (I) C 2006AJ....131.1163S
%T 5 =32800000 D 2011A&A...532A..74B
%#B 140
#=V=VizieR (local): 1
%J 170.56 +59.08 = 11:22.2 +59:05
%I.0 {NGC} 3642
#!N=NED : *** Could not access the server ***
#====Done (2013-Feb-12,16:39:48z)===="""
_cached_castor = dict()
_cached_castor[
"all"
] = """# castor #Q22524249
#=S=Simbad (via url): 1
%@ 983633
%I.0 NAME CASTOR
%C.0 **
%C.N0 12.13.00.00
%J 113.649471640 +31.888282216 = 07:34:35.87 +31:53:17.8
%J.E [34.72 25.95 0] A 2007A&A...474..653V
%P -191.45 -145.19 [3.95 2.95 0] A 2007A&A...474..653V
%X 64.12 [3.75] A 2007A&A...474..653V
%S A1V+A2Vm =0.0000D200.0030.0110000000100000 C 2001AJ....122.3466M
%#B 179
#!V=VizieR (local): No table found for: castor
#!N=NED: ****object name not recognized by NED name interpreter
#!N=NED: ***Not recognized by NED: castor
#====Done (2013-Feb-12,16:52:02z)===="""
_cached_castor[
"simbad"
] = """# castor #Q22524495
#=S=Simbad (via url): 1
%@ 983633
%I.0 NAME CASTOR
%C.0 **
%C.N0 12.13.00.00
%J 113.649471640 +31.888282216 = 07:34:35.87 +31:53:17.8
%J.E [34.72 25.95 0] A 2007A&A...474..653V
%P -191.45 -145.19 [3.95 2.95 0] A 2007A&A...474..653V
%X 64.12 [3.75] A 2007A&A...474..653V
%S A1V+A2Vm =0.0000D200.0030.0110000000100000 C 2001AJ....122.3466M
%#B 179
#====Done (2013-Feb-12,17:00:39z)===="""
@pytest.mark.remote_data
def test_names():
# First check that sesame is up
if (
urllib.request.urlopen("https://cdsweb.unistra.fr/cgi-bin/nph-sesame").getcode()
!= 200
):
pytest.skip(
"SESAME appears to be down, skipping test_name_resolve.py:test_names()..."
)
with pytest.raises(NameResolveError):
get_icrs_coordinates("m87h34hhh")
try:
icrs = get_icrs_coordinates("NGC 3642")
except NameResolveError:
ra, dec = _parse_response(_cached_ngc3642["all"])
icrs = SkyCoord(ra=float(ra) * u.degree, dec=float(dec) * u.degree)
icrs_true = SkyCoord(ra="11h 22m 18.014s", dec="59d 04m 27.27s")
# use precision of only 1 decimal here and below because the result can
# change due to Sesame server-side changes.
np.testing.assert_almost_equal(icrs.ra.degree, icrs_true.ra.degree, 1)
np.testing.assert_almost_equal(icrs.dec.degree, icrs_true.dec.degree, 1)
try:
icrs = get_icrs_coordinates("castor")
except NameResolveError:
ra, dec = _parse_response(_cached_castor["all"])
icrs = SkyCoord(ra=float(ra) * u.degree, dec=float(dec) * u.degree)
icrs_true = SkyCoord(ra="07h 34m 35.87s", dec="+31d 53m 17.8s")
np.testing.assert_almost_equal(icrs.ra.degree, icrs_true.ra.degree, 1)
np.testing.assert_almost_equal(icrs.dec.degree, icrs_true.dec.degree, 1)
@pytest.mark.remote_data
def test_name_resolve_cache(tmp_path):
from astropy.utils.data import get_cached_urls
target_name = "castor"
(temp_cache_dir := tmp_path / "cache").mkdir()
with paths.set_temp_cache(temp_cache_dir, delete=True):
assert len(get_cached_urls()) == 0
icrs1 = get_icrs_coordinates(target_name, cache=True)
urls = get_cached_urls()
assert len(urls) == 1
expected_urls = sesame_url.get()
assert any(
urls[0].startswith(x) for x in expected_urls
), f"{urls[0]} not in {expected_urls}"
# Try reloading coordinates, now should just reload cached data:
with no_internet():
icrs2 = get_icrs_coordinates(target_name, cache=True)
assert len(get_cached_urls()) == 1
assert u.allclose(icrs1.ra, icrs2.ra)
assert u.allclose(icrs1.dec, icrs2.dec)
def test_names_parse():
# a few test cases for parsing embedded coordinates from object name
test_names = [
"CRTS SSS100805 J194428-420209",
"MASTER OT J061451.7-272535.5",
"2MASS J06495091-0737408",
"1RXS J042555.8-194534",
"SDSS J132411.57+032050.5",
"DENIS-P J203137.5-000511",
"2QZ J142438.9-022739",
"CXOU J141312.3-652013",
]
for name in test_names:
sc = get_icrs_coordinates(name, parse=True)
@pytest.mark.remote_data
@pytest.mark.parametrize(
("name", "db_dict"), [("NGC 3642", _cached_ngc3642), ("castor", _cached_castor)]
)
def test_database_specify(name, db_dict):
# First check that at least some sesame mirror is up
for url in sesame_url.get():
if urllib.request.urlopen(url).getcode() == 200:
break
else:
pytest.skip(
"All SESAME mirrors appear to be down, skipping "
"test_name_resolve.py:test_database_specify()..."
)
for db in db_dict.keys():
with sesame_database.set(db):
icrs = SkyCoord.from_name(name)
time.sleep(1)
|
0f8a5af055089d25edf0c89a0d02f0c53cd64a59c78015e70695a1efdff2069b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for putting velocity differentials into SkyCoord objects.
Note: the skyoffset velocity tests are in a different file, in
test_skyoffset_transformations.py
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
ICRS,
CartesianDifferential,
CartesianRepresentation,
Galactic,
PrecessedGeocentric,
RadialDifferential,
SkyCoord,
SphericalCosLatDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test_creation_frameobjs():
i = ICRS(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=0.2 * u.mas / u.yr, pm_dec=0.1 * u.mas / u.yr
)
sc = SkyCoord(i)
for attrnm in ["ra", "dec", "pm_ra_cosdec", "pm_dec"]:
assert_quantity_allclose(getattr(i, attrnm), getattr(sc, attrnm))
sc_nod = SkyCoord(ICRS(1 * u.deg, 2 * u.deg))
for attrnm in ["ra", "dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_nod, attrnm))
def test_creation_attrs():
sc1 = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra_cosdec=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
frame="fk5",
)
assert_quantity_allclose(sc1.ra, 1 * u.deg)
assert_quantity_allclose(sc1.dec, 2 * u.deg)
assert_quantity_allclose(sc1.pm_ra_cosdec, 0.2 * u.arcsec / u.kyr)
assert_quantity_allclose(sc1.pm_dec, 0.1 * u.arcsec / u.kyr)
sc2 = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
differential_type=SphericalDifferential,
)
assert_quantity_allclose(sc2.ra, 1 * u.deg)
assert_quantity_allclose(sc2.dec, 2 * u.deg)
assert_quantity_allclose(sc2.pm_ra, 0.2 * u.arcsec / u.kyr)
assert_quantity_allclose(sc2.pm_dec, 0.1 * u.arcsec / u.kyr)
sc3 = SkyCoord(
"1:2:3 4:5:6",
pm_ra_cosdec=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
unit=(u.hour, u.deg),
)
assert_quantity_allclose(
sc3.ra, 1 * u.hourangle + 2 * u.arcmin * 15 + 3 * u.arcsec * 15
)
assert_quantity_allclose(sc3.dec, 4 * u.deg + 5 * u.arcmin + 6 * u.arcsec)
# might as well check with sillier units?
assert_quantity_allclose(
sc3.pm_ra_cosdec, 1.2776637006616473e-07 * u.arcmin / u.fortnight
)
assert_quantity_allclose(sc3.pm_dec, 6.388318503308237e-08 * u.arcmin / u.fortnight)
def test_creation_copy_basic():
i = ICRS(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=0.2 * u.mas / u.yr, pm_dec=0.1 * u.mas / u.yr
)
sc = SkyCoord(i)
sc_cpy = SkyCoord(sc)
for attrnm in ["ra", "dec", "pm_ra_cosdec", "pm_dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
def test_creation_copy_rediff():
sc = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
differential_type=SphericalDifferential,
)
sc_cpy = SkyCoord(sc)
for attrnm in ["ra", "dec", "pm_ra", "pm_dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
sc_newdiff = SkyCoord(sc, differential_type=SphericalCosLatDifferential)
reprepr = sc.represent_as(SphericalRepresentation, SphericalCosLatDifferential)
assert_quantity_allclose(
sc_newdiff.pm_ra_cosdec, reprepr.differentials["s"].d_lon_coslat
)
def test_creation_cartesian():
rep = CartesianRepresentation([10, 0.0, 0.0] * u.pc)
dif = CartesianDifferential([0, 100, 0.0] * u.pc / u.Myr)
rep = rep.with_differentials(dif)
c = SkyCoord(rep)
sdif = dif.represent_as(SphericalCosLatDifferential, rep)
assert_quantity_allclose(c.pm_ra_cosdec, sdif.d_lon_coslat)
def test_useful_error_missing():
sc_nod = SkyCoord(ICRS(1 * u.deg, 2 * u.deg))
try:
sc_nod.l
except AttributeError as e:
# this is double-checking the *normal* behavior
msg_l = e.args[0]
try:
sc_nod.pm_dec
except Exception as e:
msg_pm_dec = e.args[0]
assert "has no attribute" in msg_l
assert "has no associated differentials" in msg_pm_dec
# ----------------------Operations on SkyCoords w/ velocities-------------------
# define some fixtures to get baseline coordinates to try operations with
@pytest.fixture(
scope="module", params=[(False, False), (True, False), (False, True), (True, True)]
)
def sc(request):
incldist, inclrv = request.param
args = [1 * u.deg, 2 * u.deg]
kwargs = dict(pm_dec=1 * u.mas / u.yr, pm_ra_cosdec=2 * u.mas / u.yr)
if incldist:
kwargs["distance"] = 213.4 * u.pc
if inclrv:
kwargs["radial_velocity"] = 61 * u.km / u.s
return SkyCoord(*args, **kwargs)
@pytest.fixture(scope="module")
def scmany():
return SkyCoord(
ICRS(
ra=[1] * 100 * u.deg,
dec=[2] * 100 * u.deg,
pm_ra_cosdec=np.random.randn(100) * u.mas / u.yr,
pm_dec=np.random.randn(100) * u.mas / u.yr,
)
)
@pytest.fixture(scope="module")
def sc_for_sep():
return SkyCoord(
1 * u.deg, 2 * u.deg, pm_dec=1 * u.mas / u.yr, pm_ra_cosdec=2 * u.mas / u.yr
)
def test_separation(sc, sc_for_sep):
sc.separation(sc_for_sep)
def test_accessors(sc, scmany):
sc.data.differentials["s"]
sph = sc.spherical
gal = sc.galactic
if sc.data.get_name().startswith("unit") and not sc.data.differentials[
"s"
].get_name().startswith("unit"):
# this xfail can be eliminated when issue #7028 is resolved
pytest.xfail(".velocity fails if there is an RV but not distance")
sc.velocity
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
scmany[0]
sph = scmany.spherical
gal = scmany.galactic
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
def test_transforms(sc):
trans = sc.transform_to("galactic")
assert isinstance(trans.frame, Galactic)
def test_transforms_diff(sc):
# note that arguably this *should* fail for the no-distance cases: 3D
# information is necessary to truly solve this, hence the xfail
if not sc.distance.unit.is_equivalent(u.m):
pytest.xfail("Should fail for no-distance cases")
else:
trans = sc.transform_to(PrecessedGeocentric(equinox="B1975"))
assert isinstance(trans.frame, PrecessedGeocentric)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_matching(sc, scmany):
# just check that it works and yields something
idx, d2d, d3d = sc.match_to_catalog_sky(scmany)
def test_position_angle(sc, sc_for_sep):
sc.position_angle(sc_for_sep)
def test_constellations(sc):
const = sc.get_constellation()
assert const == "Pisces"
def test_separation_3d_with_differentials():
c1 = SkyCoord(
ra=138 * u.deg,
dec=-17 * u.deg,
distance=100 * u.pc,
pm_ra_cosdec=5 * u.mas / u.yr,
pm_dec=-7 * u.mas / u.yr,
radial_velocity=160 * u.km / u.s,
)
c2 = SkyCoord(
ra=138 * u.deg,
dec=-17 * u.deg,
distance=105 * u.pc,
pm_ra_cosdec=15 * u.mas / u.yr,
pm_dec=-74 * u.mas / u.yr,
radial_velocity=-60 * u.km / u.s,
)
sep = c1.separation_3d(c2)
assert_quantity_allclose(sep, 5 * u.pc)
@pytest.mark.parametrize("sph_type", ["spherical", "unitspherical"])
def test_cartesian_to_spherical(sph_type):
"""Conversion to unitspherical should work, even if we lose distance."""
c = SkyCoord(
x=1 * u.kpc,
y=0 * u.kpc,
z=0 * u.kpc,
v_x=10 * u.km / u.s,
v_y=0 * u.km / u.s,
v_z=4.74 * u.km / u.s,
representation_type="cartesian",
)
c.representation_type = sph_type
assert c.ra == 0
assert c.dec == 0
assert c.pm_ra == 0
assert u.allclose(c.pm_dec, 1 * (u.mas / u.yr), rtol=1e-3)
assert u.allclose(c.radial_velocity, 10 * (u.km / u.s))
if sph_type == "spherical":
assert u.allclose(c.distance, 1 * u.kpc)
else:
assert not hasattr(c, "distance")
@pytest.mark.parametrize(
"diff_info, diff_cls",
[
(dict(radial_velocity=[20, 30] * u.km / u.s), RadialDifferential),
(
dict(
pm_ra=[2, 3] * u.mas / u.yr,
pm_dec=[-3, -4] * u.mas / u.yr,
differential_type="unitspherical",
),
UnitSphericalDifferential,
),
(
dict(pm_ra_cosdec=[2, 3] * u.mas / u.yr, pm_dec=[-3, -4] * u.mas / u.yr),
UnitSphericalCosLatDifferential,
),
],
scope="class",
)
class TestDifferentialClassPropagation:
"""Test that going in between spherical and unit-spherical, we do not
change differential type (since both can handle the same types).
"""
def test_sc_unit_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(ra=[10, 20] * u.deg, dec=[-10, 10] * u.deg, **diff_info)
assert isinstance(sc.data, UnitSphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("spherical")
assert isinstance(sr, SphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
def test_sc_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(
ra=[10, 20] * u.deg,
dec=[-10, 10] * u.deg,
distance=1.0 * u.kpc,
**diff_info
)
assert isinstance(sc.data, SphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("unitspherical")
assert isinstance(sr, UnitSphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
|
199249afee44f921062a41ad3917ba286f54a0282e5ddaa57a8d63c74cdaa254 | from contextlib import nullcontext
import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy import time
from astropy.constants import c
from astropy.coordinates import (
FK5,
GCRS,
ICRS,
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
Galactic,
SkyCoord,
SpectralQuantity,
get_body_barycentric_posvel,
)
from astropy.coordinates.sites import get_builtin_sites
from astropy.coordinates.spectral_coordinate import (
SpectralCoord,
_apply_relativistic_doppler_shift,
)
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose, quantity_allclose
from astropy.utils import iers
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning, AstropyWarning
from astropy.wcs.wcsapi.fitswcs import VELOCITY_FRAMES as FITSWCS_VELOCITY_FRAMES
GREENWICH = get_builtin_sites()["greenwich"]
def assert_frame_allclose(
frame1,
frame2,
pos_rtol=1e-7,
pos_atol=1 * u.m,
vel_rtol=1e-7,
vel_atol=1 * u.mm / u.s,
):
# checks that:
# - the positions are equal to within some tolerance (the relative tolerance
# should be dimensionless, the absolute tolerance should be a distance).
# note that these are the tolerances *in 3d*
# - either both or nether frame has velocities, or if one has no velocities
# the other one can have zero velocities
# - if velocities are present, they are equal to some tolerance
# Ideally this should accept both frames and SkyCoords
if hasattr(frame1, "frame"): # SkyCoord-like
frame1 = frame1.frame
if hasattr(frame2, "frame"): # SkyCoord-like
frame2 = frame2.frame
# assert (frame1.data.differentials and frame2.data.differentials or
# (not frame1.data.differentials and not frame2.data.differentials))
assert frame1.is_equivalent_frame(frame2)
frame2_in_1 = frame2.transform_to(frame1)
assert_quantity_allclose(
0 * u.m, frame1.separation_3d(frame2_in_1), rtol=pos_rtol, atol=pos_atol
)
if frame1.data.differentials:
d1 = frame1.data.represent_as(
CartesianRepresentation, CartesianDifferential
).differentials["s"]
d2 = frame2_in_1.data.represent_as(
CartesianRepresentation, CartesianDifferential
).differentials["s"]
assert_quantity_allclose(d1.norm(d1), d1.norm(d2), rtol=vel_rtol, atol=vel_atol)
# GENERAL TESTS
# We first run through a series of cases to test different ways of initializing
# the observer and target for SpectralCoord, including for example frames,
# SkyCoords, and making sure that SpectralCoord is not sensitive to the actual
# frame or representation class.
# Local Standard of Rest
LSRD = Galactic(
u=0.1 * u.km,
v=0.1 * u.km,
w=0.1 * u.km,
U=9 * u.km / u.s,
V=12 * u.km / u.s,
W=7 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
LSRD_EQUIV = [
LSRD,
SkyCoord(LSRD), # as a SkyCoord
LSRD.transform_to(ICRS()), # different frame
LSRD.transform_to(ICRS()).transform_to(Galactic()), # different representation
]
@pytest.fixture(params=[None] + LSRD_EQUIV)
def observer(request):
return request.param
# Target located in direction of motion of LSRD with no velocities
LSRD_DIR_STATIONARY = Galactic(
u=9 * u.km, v=12 * u.km, w=7 * u.km, representation_type="cartesian"
)
LSRD_DIR_STATIONARY_EQUIV = [
LSRD_DIR_STATIONARY,
SkyCoord(LSRD_DIR_STATIONARY), # as a SkyCoord
LSRD_DIR_STATIONARY.transform_to(FK5()), # different frame
# different representation
LSRD_DIR_STATIONARY.transform_to(ICRS()).transform_to(Galactic()),
]
@pytest.fixture(params=[None] + LSRD_DIR_STATIONARY_EQUIV)
def target(request):
return request.param
def test_create_spectral_coord_observer_target(observer, target):
with nullcontext() if target is None else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
coord = SpectralCoord([100, 200, 300] * u.nm, observer=observer, target=target)
if observer is None:
assert coord.observer is None
else:
assert_frame_allclose(observer, coord.observer)
if target is None:
assert coord.target is None
else:
assert_frame_allclose(target, coord.target)
assert coord.doppler_rest is None
assert coord.doppler_convention is None
if observer is None or target is None:
assert quantity_allclose(coord.redshift, 0)
assert quantity_allclose(coord.radial_velocity, 0 * u.km / u.s)
elif any(observer is lsrd for lsrd in LSRD_EQUIV) and any(
target is lsrd for lsrd in LSRD_DIR_STATIONARY_EQUIV
):
assert_quantity_allclose(
coord.radial_velocity, -(274**0.5) * u.km / u.s, atol=1e-4 * u.km / u.s
)
assert_quantity_allclose(coord.redshift, -5.5213158163147646e-05, atol=1e-9)
else:
raise NotImplementedError()
def test_create_from_spectral_coord(observer, target):
"""
Checks that parameters are correctly copied to the new SpectralCoord object
"""
with nullcontext() if target is None else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
spec_coord1 = SpectralCoord(
[100, 200, 300] * u.nm,
observer=observer,
target=target,
doppler_convention="optical",
doppler_rest=6000 * u.AA,
)
spec_coord2 = SpectralCoord(spec_coord1)
assert spec_coord1.observer == spec_coord2.observer
assert spec_coord1.target == spec_coord2.target
assert spec_coord1.radial_velocity == spec_coord2.radial_velocity
assert spec_coord1.doppler_convention == spec_coord2.doppler_convention
assert spec_coord1.doppler_rest == spec_coord2.doppler_rest
# INTERNAL FUNCTIONS TESTS
def test_apply_relativistic_doppler_shift():
# Frequency
sq1 = SpectralQuantity(1 * u.GHz)
sq2 = _apply_relativistic_doppler_shift(sq1, 0.5 * c)
assert_quantity_allclose(sq2, np.sqrt(1.0 / 3.0) * u.GHz)
# Wavelength
sq3 = SpectralQuantity(500 * u.nm)
sq4 = _apply_relativistic_doppler_shift(sq3, 0.5 * c)
assert_quantity_allclose(sq4, np.sqrt(3) * 500 * u.nm)
# Energy
sq5 = SpectralQuantity(300 * u.eV)
sq6 = _apply_relativistic_doppler_shift(sq5, 0.5 * c)
assert_quantity_allclose(sq6, np.sqrt(1.0 / 3.0) * 300 * u.eV)
# Wavenumber
sq7 = SpectralQuantity(0.01 / u.micron)
sq8 = _apply_relativistic_doppler_shift(sq7, 0.5 * c)
assert_quantity_allclose(sq8, np.sqrt(1.0 / 3.0) * 0.01 / u.micron)
# Velocity (doppler_convention='relativistic')
sq9 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="relativistic", doppler_rest=1 * u.GHz
)
sq10 = _apply_relativistic_doppler_shift(sq9, 300 * u.km / u.s)
assert_quantity_allclose(sq10, 499.999666 * u.km / u.s)
assert sq10.doppler_convention == "relativistic"
# Velocity (doppler_convention='optical')
sq11 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="radio", doppler_rest=1 * u.GHz
)
sq12 = _apply_relativistic_doppler_shift(sq11, 300 * u.km / u.s)
assert_quantity_allclose(sq12, 499.650008 * u.km / u.s)
assert sq12.doppler_convention == "radio"
# Velocity (doppler_convention='radio')
sq13 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="optical", doppler_rest=1 * u.GHz
)
sq14 = _apply_relativistic_doppler_shift(sq13, 300 * u.km / u.s)
assert_quantity_allclose(sq14, 500.350493 * u.km / u.s)
assert sq14.doppler_convention == "optical"
# Velocity - check relativistic velocity addition
sq13 = SpectralQuantity(
0 * u.km / u.s, doppler_convention="relativistic", doppler_rest=1 * u.GHz
)
sq14 = _apply_relativistic_doppler_shift(sq13, 0.999 * c)
assert_quantity_allclose(sq14, 0.999 * c)
sq14 = _apply_relativistic_doppler_shift(sq14, 0.999 * c)
assert_quantity_allclose(sq14, (0.999 * 2) / (1 + 0.999**2) * c)
assert sq14.doppler_convention == "relativistic"
# Cases that should raise errors
sq15 = SpectralQuantity(200 * u.km / u.s)
with pytest.raises(ValueError, match="doppler_convention not set"):
_apply_relativistic_doppler_shift(sq15, 300 * u.km / u.s)
sq16 = SpectralQuantity(200 * u.km / u.s, doppler_rest=10 * u.GHz)
with pytest.raises(ValueError, match="doppler_convention not set"):
_apply_relativistic_doppler_shift(sq16, 300 * u.km / u.s)
sq17 = SpectralQuantity(200 * u.km / u.s, doppler_convention="optical")
with pytest.raises(ValueError, match="doppler_rest not set"):
_apply_relativistic_doppler_shift(sq17, 300 * u.km / u.s)
# BASIC TESTS
def test_init_quantity():
sc = SpectralCoord(10 * u.GHz)
assert sc.value == 10.0
assert sc.unit is u.GHz
assert sc.doppler_convention is None
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_spectral_quantity():
sc = SpectralCoord(SpectralQuantity(10 * u.GHz, doppler_convention="optical"))
assert sc.value == 10.0
assert sc.unit is u.GHz
assert sc.doppler_convention == "optical"
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_too_many_args():
with pytest.raises(
ValueError, match="Cannot specify radial velocity or redshift if both"
):
SpectralCoord(
10 * u.GHz,
observer=LSRD,
target=SkyCoord(10, 20, unit="deg"),
radial_velocity=1 * u.km / u.s,
)
with pytest.raises(
ValueError, match="Cannot specify radial velocity or redshift if both"
):
SpectralCoord(
10 * u.GHz, observer=LSRD, target=SkyCoord(10, 20, unit="deg"), redshift=1
)
with pytest.raises(
ValueError, match="Cannot set both a radial velocity and redshift"
):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.km / u.s, redshift=1)
def test_init_wrong_type():
with pytest.raises(
TypeError, match="observer must be a SkyCoord or coordinate frame instance"
):
SpectralCoord(10 * u.GHz, observer=3.4)
with pytest.raises(
TypeError, match="target must be a SkyCoord or coordinate frame instance"
):
SpectralCoord(10 * u.GHz, target=3.4)
with pytest.raises(
u.UnitsError,
match=(
"Argument 'radial_velocity' to function "
"'__new__' must be in units convertible to 'km / s'"
),
):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.kg)
with pytest.raises(
TypeError,
match=(
"Argument 'radial_velocity' to function '__new__' has no 'unit' attribute."
" You should pass in an astropy Quantity instead."
),
):
SpectralCoord(10 * u.GHz, radial_velocity="banana")
with pytest.raises(u.UnitsError, match="redshift should be dimensionless"):
SpectralCoord(10 * u.GHz, redshift=1 * u.m)
with pytest.raises(
TypeError,
match='Cannot parse "banana" as a Quantity. It does not start with a number.',
):
SpectralCoord(10 * u.GHz, redshift="banana")
def test_observer_init_rv_behavior():
"""
Test basic initialization behavior or observer/target and redshift/rv
"""
# Start off by specifying the radial velocity only
sc_init = SpectralCoord([4000, 5000] * u.AA, radial_velocity=100 * u.km / u.s)
assert sc_init.observer is None
assert sc_init.target is None
assert_quantity_allclose(sc_init.radial_velocity, 100 * u.km / u.s)
# Next, set the observer, and check that the radial velocity hasn't changed
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init.observer = ICRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
assert sc_init.observer is not None
assert_quantity_allclose(sc_init.radial_velocity, 100 * u.km / u.s)
# Setting the target should now cause the original radial velocity to be
# dropped in favor of the automatically computed one
sc_init.target = SkyCoord(
CartesianRepresentation([1 * u.km, 0 * u.km, 0 * u.km]),
frame="icrs",
radial_velocity=30 * u.km / u.s,
)
assert sc_init.target is not None
assert_quantity_allclose(sc_init.radial_velocity, 30 * u.km / u.s)
# The observer can only be set if originally None - now that it isn't
# setting it again should fail
with pytest.raises(ValueError, match="observer has already been set"):
sc_init.observer = GCRS(CartesianRepresentation([0 * u.km, 1 * u.km, 0 * u.km]))
# And similarly, changing the target should not be possible
with pytest.raises(ValueError, match="target has already been set"):
sc_init.target = GCRS(CartesianRepresentation([0 * u.km, 1 * u.km, 0 * u.km]))
def test_rv_redshift_initialization():
# Check that setting the redshift sets the radial velocity appropriately,
# and that the redshift can be recovered
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=1)
assert isinstance(sc_init.redshift, u.Quantity)
assert_quantity_allclose(sc_init.redshift, 1 * u.dimensionless_unscaled)
assert_quantity_allclose(sc_init.radial_velocity, 0.6 * c)
# Check that setting the same radial velocity produces the same redshift
# and that the radial velocity can be recovered
sc_init2 = SpectralCoord([4000, 5000] * u.AA, radial_velocity=0.6 * c)
assert_quantity_allclose(sc_init2.redshift, 1 * u.dimensionless_unscaled)
assert_quantity_allclose(sc_init2.radial_velocity, 0.6 * c)
# Check that specifying redshift as a quantity works
sc_init3 = SpectralCoord([4000, 5000] * u.AA, redshift=1 * u.one)
assert sc_init.redshift == sc_init3.redshift
# Make sure that both redshift and radial velocity can't be specified at
# the same time.
with pytest.raises(
ValueError, match="Cannot set both a radial velocity and redshift"
):
SpectralCoord([4000, 5000] * u.AA, radial_velocity=10 * u.km / u.s, redshift=2)
def test_replicate():
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_set_rv = sc_init.replicate(redshift=1)
assert_quantity_allclose(sc_set_rv.radial_velocity, 0.6 * c)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
sc_set_rv = sc_init.replicate(radial_velocity=c / 2)
assert_quantity_allclose(sc_set_rv.redshift, np.sqrt(3) - 1)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
gcrs_origin = GCRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init2 = SpectralCoord([4000, 5000] * u.AA, redshift=1, observer=gcrs_origin)
with np.errstate(all="ignore"):
sc_init2.replicate(redshift=0.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init3 = SpectralCoord([4000, 5000] * u.AA, redshift=1, target=gcrs_origin)
with np.errstate(all="ignore"):
sc_init3.replicate(redshift=0.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init4 = SpectralCoord(
[4000, 5000] * u.AA, observer=gcrs_origin, target=gcrs_origin
)
with pytest.raises(
ValueError,
match=(
"Cannot specify radial velocity or redshift if both target and observer are"
" specified"
),
):
sc_init4.replicate(redshift=0.5)
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_init_copy = sc_init.replicate(copy=True)
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_copy, [4000, 5000] * u.AA)
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_init_ref = sc_init.replicate()
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_ref, [6000, 5000] * u.AA)
def test_with_observer_stationary_relative_to():
# Simple tests of with_observer_stationary_relative_to to cover different
# ways of calling it
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc1 = SpectralCoord([4000, 5000] * u.AA)
with pytest.raises(
ValueError,
match=(
"This method can only be used if both observer and target are defined on"
" the SpectralCoord"
),
):
sc1.with_observer_stationary_relative_to("icrs")
sc2 = SpectralCoord(
[4000, 5000] * u.AA,
observer=ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
-1 * u.km / u.s,
0 * u.km / u.s,
-1 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
),
target=ICRS(
0 * u.deg, 45 * u.deg, distance=1 * u.kpc, radial_velocity=2 * u.km / u.s
),
)
# Motion of observer is in opposite direction to target
assert_quantity_allclose(sc2.radial_velocity, (2 + 2**0.5) * u.km / u.s)
# Change to observer that is stationary in ICRS
sc3 = sc2.with_observer_stationary_relative_to("icrs")
# Velocity difference is now pure radial velocity of target
assert_quantity_allclose(sc3.radial_velocity, 2 * u.km / u.s)
# Check setting the velocity in with_observer_stationary_relative_to
sc4 = sc2.with_observer_stationary_relative_to(
"icrs", velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
# Observer once again moving away from target but faster
assert_quantity_allclose(sc4.radial_velocity, 4 * u.km / u.s)
# Check that we can also pass frame classes instead of names
sc5 = sc2.with_observer_stationary_relative_to(
ICRS, velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
assert_quantity_allclose(sc5.radial_velocity, 4 * u.km / u.s)
# And make sure we can also pass instances of classes without data
sc6 = sc2.with_observer_stationary_relative_to(
ICRS(), velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
assert_quantity_allclose(sc6.radial_velocity, 4 * u.km / u.s)
# And with data provided no velocities are present
sc7 = sc2.with_observer_stationary_relative_to(
ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type="cartesian"),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
assert_quantity_allclose(sc7.radial_velocity, 4 * u.km / u.s)
# And also have the ability to pass frames with velocities already defined
sc8 = sc2.with_observer_stationary_relative_to(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
)
assert_quantity_allclose(
sc8.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# Make sure that things work properly if passing a SkyCoord
sc9 = sc2.with_observer_stationary_relative_to(
SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type="cartesian")),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
assert_quantity_allclose(sc9.radial_velocity, 4 * u.km / u.s)
sc10 = sc2.with_observer_stationary_relative_to(
SkyCoord(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
)
)
assert_quantity_allclose(
sc10.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# But we shouldn't be able to pass both a frame with velocities, and explicit velocities
with pytest.raises(
ValueError,
match="frame already has differentials, cannot also specify velocity",
):
sc2.with_observer_stationary_relative_to(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
# And velocities should have three elements
with pytest.raises(
ValueError, match="velocity should be a Quantity vector with 3 elements"
):
sc2.with_observer_stationary_relative_to(
ICRS, velocity=[-(2**0.5), 0, -(2**0.5), -3] * u.km / u.s
)
# Make sure things don't change depending on what frame class is used for reference
sc11 = sc2.with_observer_stationary_relative_to(
SkyCoord(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
).transform_to(Galactic)
)
assert_quantity_allclose(
sc11.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# Check that it is possible to preserve the observer frame
sc12 = sc2.with_observer_stationary_relative_to(LSRD)
sc13 = sc2.with_observer_stationary_relative_to(LSRD, preserve_observer_frame=True)
assert isinstance(sc12.observer, Galactic)
assert isinstance(sc13.observer, ICRS)
def test_los_shift_radial_velocity():
# Tests to make sure that with_radial_velocity_shift correctly calculates
# the new radial velocity
# First check case where observer and/or target aren't specified
sc1 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s)
sc2 = sc1.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc2.radial_velocity, 2 * u.km / u.s)
sc3 = sc1.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc3.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc4 = SpectralCoord(
500 * u.nm, radial_velocity=1 * u.km / u.s, observer=gcrs_not_origin
)
sc5 = sc4.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc5.radial_velocity, 2 * u.km / u.s)
sc6 = sc4.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc6.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc7 = SpectralCoord(
500 * u.nm,
radial_velocity=1 * u.km / u.s,
target=ICRS(10 * u.deg, 20 * u.deg),
)
sc8 = sc7.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc8.radial_velocity, 2 * u.km / u.s)
sc9 = sc7.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc9.radial_velocity, -2 * u.km / u.s)
# Check that things still work when both observer and target are specified
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc10 = SpectralCoord(
500 * u.nm,
observer=ICRS(0 * u.deg, 0 * u.deg, distance=1 * u.m),
target=ICRS(
10 * u.deg,
20 * u.deg,
radial_velocity=1 * u.km / u.s,
distance=10 * u.kpc,
),
)
sc11 = sc10.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc11.radial_velocity, 2 * u.km / u.s)
sc12 = sc10.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc12.radial_velocity, -2 * u.km / u.s)
# Check that things work if radial_velocity wasn't specified at all
sc13 = SpectralCoord(500 * u.nm)
sc14 = sc13.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc14.radial_velocity, 1 * u.km / u.s)
sc15 = sc1.with_radial_velocity_shift()
assert_quantity_allclose(sc15.radial_velocity, 1 * u.km / u.s)
# Check that units are verified
with pytest.raises(
u.UnitsError,
match=(
"Argument must have unit physical type 'speed' for radial velocty or "
"'dimensionless' for redshift."
),
):
sc1.with_radial_velocity_shift(target_shift=1 * u.kg)
@pytest.mark.xfail
def test_relativistic_radial_velocity():
# Test for when both observer and target have relativistic velocities.
# This is not yet supported, so the test is xfailed for now.
sc = SpectralCoord(
500 * u.nm,
observer=ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
-0.5 * c,
-0.5 * c,
-0.5 * c,
representation_type="cartesian",
differential_type="cartesian",
),
target=ICRS(
1 * u.kpc,
1 * u.kpc,
1 * u.kpc,
0.5 * c,
0.5 * c,
0.5 * c,
representation_type="cartesian",
differential_type="cartesian",
),
)
assert_quantity_allclose(sc.radial_velocity, 0.989743318610787 * u.km / u.s)
# SCIENCE USE CASE TESTS
def test_spectral_coord_jupiter():
"""
Checks radial velocity between Earth and Jupiter
"""
obstime = time.Time("2018-12-13 9:00")
obs = GREENWICH.get_gcrs(obstime)
pos, vel = get_body_barycentric_posvel("jupiter", obstime)
jupiter = SkyCoord(
pos.with_differentials(CartesianDifferential(vel.xyz)), obstime=obstime
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=jupiter)
# The velocity should be less than ~43 + a bit extra, which is the
# maximum possible earth-jupiter relative velocity. We check the exact
# value here (determined from SpectralCoord, so this serves as a test to
# check that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -7.35219854 * u.km / u.s)
def test_spectral_coord_alphacen():
"""
Checks radial velocity between Earth and Alpha Centauri
"""
obstime = time.Time("2018-12-13 9:00")
obs = GREENWICH.get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# acen = SkyCoord.from_name('alpha cen')
acen = SkyCoord(
ra=219.90085 * u.deg,
dec=-60.83562 * u.deg,
frame="icrs",
distance=4.37 * u.lightyear,
radial_velocity=-18.0 * u.km / u.s,
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=acen)
# The velocity should be less than ~18 + 30 + a bit extra, which is the
# maximum possible relative velocity. We check the exact value here
# (determined from SpectralCoord, so this serves as a test to check that
# this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -26.328301 * u.km / u.s)
def test_spectral_coord_m31():
"""
Checks radial velocity between Earth and M31
"""
obstime = time.Time("2018-12-13 9:00")
obs = GREENWICH.get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# m31 = SkyCoord.from_name('M31')
m31 = SkyCoord(
ra=10.6847 * u.deg,
dec=41.269 * u.deg,
distance=710 * u.kpc,
radial_velocity=-300 * u.km / u.s,
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=m31)
# The velocity should be less than ~300 + 30 + a bit extra in km/s, which
# is the maximum possible relative velocity. We check the exact values
# here (determined from SpectralCoord, so this serves as a test to check
# that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -279.755128 * u.km / u.s)
assert_allclose(spc.redshift, -0.0009327276702120191)
def test_shift_to_rest_galaxy():
"""
This tests storing a spectral coordinate with a specific redshift, and then
doing basic rest-to-observed-and-back transformations
"""
z = 5
rest_line_wls = [5007, 6563] * u.AA
observed_spc = SpectralCoord(rest_line_wls * (z + 1), redshift=z)
rest_spc = observed_spc.to_rest()
# alternatively:
# rest_spc = observed_spc.with_observer(observed_spec.target)
# although then it would have to be clearly documented, or the `to_rest`
# implemented in Spectrum1D?
assert_quantity_allclose(rest_spc, rest_line_wls)
# No frames are explicitly defined, so to the user, the observer and
# target are not set.
with pytest.raises(AttributeError):
assert_frame_allclose(rest_spc.observer, rest_spc.target)
def test_shift_to_rest_star_withobserver():
rv = -8.3283011 * u.km / u.s
rest_line_wls = [5007, 6563] * u.AA
obstime = time.Time("2018-12-13 9:00")
obs = GREENWICH.get_gcrs(obstime)
acen = SkyCoord(
ra=219.90085 * u.deg,
dec=-60.83562 * u.deg,
frame="icrs",
distance=4.37 * u.lightyear,
)
# Note that above the rv is missing from the SkyCoord.
# That's intended, as it will instead be set in the `SpectralCoord`. But
# the SpectralCoord machinery should yield something comparable to test_
# spectral_coord_alphacen
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
observed_spc = SpectralCoord(
rest_line_wls * (rv / c + 1), observer=obs, target=acen
)
rest_spc = observed_spc.to_rest()
assert_quantity_allclose(rest_spc, rest_line_wls)
barycentric_spc = observed_spc.with_observer_stationary_relative_to("icrs")
baryrest_spc = barycentric_spc.to_rest()
assert quantity_allclose(baryrest_spc, rest_line_wls)
# now make sure the change the barycentric shift did is comparable to the
# offset rv_correction produces
# barytarg = SkyCoord(barycentric_spc.target.frame) # should be this but that doesn't work for unclear reasons
barytarg = SkyCoord(
barycentric_spc.target.data.without_differentials(),
frame=barycentric_spc.target.realize_frame(None),
)
vcorr = barytarg.radial_velocity_correction(
kind="barycentric", obstime=obstime, location=GREENWICH
)
drv = baryrest_spc.radial_velocity - observed_spc.radial_velocity
# note this probably will not work on the first try, but it's ok if this is
# "good enough", where good enough is estimated below. But that could be
# adjusted if we think that's too aggressive of a precision target for what
# the machinery can handle
# with pytest.raises(AssertionError):
assert_quantity_allclose(vcorr, drv, atol=10 * u.m / u.s)
gcrs_origin = GCRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
gcrs_not_origin = GCRS(CartesianRepresentation([1 * u.km, 0 * u.km, 0 * u.km]))
@pytest.mark.parametrize(
"sc_kwargs",
[
dict(radial_velocity=0 * u.km / u.s),
dict(observer=gcrs_origin, radial_velocity=0 * u.km / u.s),
dict(target=gcrs_origin, radial_velocity=0 * u.km / u.s),
dict(observer=gcrs_origin, target=gcrs_not_origin),
],
)
def test_los_shift(sc_kwargs):
wl = [4000, 5000] * u.AA
with nullcontext() if "observer" not in sc_kwargs and "target" not in sc_kwargs else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
sc_init = SpectralCoord(wl, **sc_kwargs)
# these should always work in *all* cases because it's unambiguous that
# a target shift should behave this way
new_sc1 = sc_init.with_radial_velocity_shift(0.1)
assert_quantity_allclose(new_sc1, wl * 1.1)
# interpret at redshift
new_sc2 = sc_init.with_radial_velocity_shift(0.1 * u.dimensionless_unscaled)
assert_quantity_allclose(new_sc1, new_sc2)
new_sc3 = sc_init.with_radial_velocity_shift(-100 * u.km / u.s)
assert_quantity_allclose(new_sc3, wl * (1 + (-100 * u.km / u.s / c)))
# now try the cases where observer is specified as well/instead
if sc_init.observer is None or sc_init.target is None:
with pytest.raises(ValueError):
# both must be specified if you're going to mess with observer
sc_init.with_radial_velocity_shift(observer_shift=0.1)
if sc_init.observer is not None and sc_init.target is not None:
# redshifting the observer should *blushift* the LOS velocity since
# its the observer-to-target vector that matters
new_sc4 = sc_init.with_radial_velocity_shift(observer_shift=0.1)
assert_quantity_allclose(new_sc4, wl / 1.1)
# an equal shift in both should produce no offset at all
new_sc5 = sc_init.with_radial_velocity_shift(
target_shift=0.1, observer_shift=0.1
)
assert_quantity_allclose(new_sc5, wl)
def test_asteroid_velocity_frame_shifts():
"""
This test mocks up the use case of observing a spectrum of an asteroid
at different times and from different observer locations.
"""
time1 = time.Time("2018-12-13 9:00")
dt = 12 * u.hour
time2 = time1 + dt
# make the silly but simplifying assumption that the asteroid is moving along
# the x-axis of GCRS, and makes a 10 earth-radius closest approach
v_ast = [5, 0, 0] * u.km / u.s
x1 = -v_ast[0] * dt / 2
x2 = v_ast[0] * dt / 2
z = 10 * u.Rearth
cdiff = CartesianDifferential(v_ast)
asteroid_loc1 = GCRS(
CartesianRepresentation(x1.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time1,
)
asteroid_loc2 = GCRS(
CartesianRepresentation(x2.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time2,
)
# assume satellites that are essentially fixed in geostationary orbit on
# opposite sides of the earth
observer1 = GCRS(
CartesianRepresentation([0 * u.km, 35000 * u.km, 0 * u.km]), obstime=time1
)
observer2 = GCRS(
CartesianRepresentation([0 * u.km, -35000 * u.km, 0 * u.km]), obstime=time2
)
wls = np.linspace(4000, 7000, 100) * u.AA
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord1 = SpectralCoord(wls, observer=observer1, target=asteroid_loc1)
assert spec_coord1.radial_velocity < 0 * u.km / u.s
assert spec_coord1.radial_velocity > -5 * u.km / u.s
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord2 = SpectralCoord(wls, observer=observer2, target=asteroid_loc2)
assert spec_coord2.radial_velocity > 0 * u.km / u.s
assert spec_coord2.radial_velocity < 5 * u.km / u.s
# now check the behavior of with_observer_stationary_relative_to: we shift each coord
# into the velocity frame of its *own* target. That would then be a
# spectralcoord that would allow direct physical comparison of the two
# different spec_corrds. There's no way to test that, without
# actual data, though.
# spec_coord2 is redshifted, so we test that it behaves the way "shifting
# to rest frame" should - the as-observed spectral coordinate should become
# the rest frame, so something that starts out red should become bluer
target_sc2 = spec_coord2.with_observer_stationary_relative_to(spec_coord2.target)
assert np.all(target_sc2 < spec_coord2)
# rv/redshift should be 0 since the observer and target velocities should
# be the same
assert_quantity_allclose(
target_sc2.radial_velocity, 0 * u.km / u.s, atol=1e-7 * u.km / u.s
)
# check that the same holds for spec_coord1, but be more specific: it
# should follow the standard redshift formula (which in this case yields
# a blueshift, although the formula is the same as 1+z)
target_sc1 = spec_coord1.with_observer_stationary_relative_to(spec_coord1.target)
assert_quantity_allclose(target_sc1, spec_coord1 / (1 + spec_coord1.redshift))
# TODO: Figure out what is meant by the below use case
# ensure the "target-rest" use gives the same answer
# target_sc1_alt = spec_coord1.with_observer_stationary_relative_to('target-rest')
# assert_quantity_allclose(target_sc1, target_sc1_alt)
def test_spectral_coord_from_sky_coord_without_distance():
# see https://github.com/astropy/specutils/issues/658 for issue context
obs = SkyCoord(0 * u.m, 0 * u.m, 0 * u.m, representation_type="cartesian")
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
coord = SpectralCoord([1, 2, 3] * u.micron, observer=obs)
# coord.target = SkyCoord.from_name('m31') # <- original issue, but below is the same but requires no remote data access
with pytest.warns(
AstropyUserWarning, match="Distance on coordinate object is dimensionless"
):
coord.target = SkyCoord(ra=10.68470833 * u.deg, dec=41.26875 * u.deg)
EXPECTED_VELOCITY_FRAMES = {
"geocent": "gcrs",
"heliocent": "hcrs",
"lsrk": "lsrk",
"lsrd": "lsrd",
"galactoc": FITSWCS_VELOCITY_FRAMES["GALACTOC"],
"localgrp": FITSWCS_VELOCITY_FRAMES["LOCALGRP"],
}
@pytest.mark.parametrize("specsys", list(EXPECTED_VELOCITY_FRAMES))
@pytest.mark.slow
def test_spectralcoord_accuracy(specsys):
# This is a test to check the numerical results of transformations between
# different velocity frames in SpectralCoord. This compares the velocity
# shifts determined with SpectralCoord to those determined from the rv
# package in Starlink.
velocity_frame = EXPECTED_VELOCITY_FRAMES[specsys]
reference_filename = get_pkg_data_filename("accuracy/data/rv.ecsv")
reference_table = Table.read(reference_filename, format="ascii.ecsv")
rest = 550 * u.nm
with iers.conf.set_temp("auto_download", False):
for row in reference_table:
observer = EarthLocation.from_geodetic(
-row["obslon"], row["obslat"]
).get_itrs(obstime=row["obstime"])
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_topo = SpectralCoord(
545 * u.nm, observer=observer, target=row["target"]
)
# FIXME: A warning is emitted for dates after MJD=57754.0 even
# though the leap second table should be valid until the end of
# 2020.
with nullcontext() if row["obstime"].mjd < 57754 else pytest.warns(
AstropyWarning, match="Tried to get polar motions"
):
sc_final = sc_topo.with_observer_stationary_relative_to(velocity_frame)
delta_vel = sc_topo.to(
u.km / u.s, doppler_convention="relativistic", doppler_rest=rest
) - sc_final.to(
u.km / u.s, doppler_convention="relativistic", doppler_rest=rest
)
if specsys == "galactoc":
assert_allclose(
delta_vel.to_value(u.km / u.s), row[specsys.lower()], atol=30
)
else:
assert_allclose(
delta_vel.to_value(u.km / u.s),
row[specsys.lower()],
atol=0.02,
rtol=0.002,
)
# TODO: add test when target is not ICRS
# TODO: add test when SpectralCoord is in velocity to start with
|
39f8283a3a287c5b386562d0e16f91398205e956f0448160df89166171c5528b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates.matrix_utilities import (
angle_axis,
is_O3,
is_rotation,
matrix_product,
rotation_matrix,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_rotation_matrix():
assert_allclose(rotation_matrix(0 * u.deg, "x"), np.eye(3))
assert_allclose(
rotation_matrix(90 * u.deg, "y"), [[0, 0, -1], [0, 1, 0], [1, 0, 0]], atol=1e-12
)
assert_allclose(
rotation_matrix(-90 * u.deg, "z"),
[[0, -1, 0], [1, 0, 0], [0, 0, 1]],
atol=1e-12,
)
assert_allclose(
rotation_matrix(45 * u.deg, "x"), rotation_matrix(45 * u.deg, [1, 0, 0])
)
assert_allclose(
rotation_matrix(125 * u.deg, "y"), rotation_matrix(125 * u.deg, [0, 1, 0])
)
assert_allclose(
rotation_matrix(-30 * u.deg, "z"), rotation_matrix(-30 * u.deg, [0, 0, 1])
)
assert_allclose(
np.dot(rotation_matrix(180 * u.deg, [1, 1, 0]), [1, 0, 0]),
[0, 1, 0],
atol=1e-12,
)
# make sure it also works for very small angles
assert_allclose(
rotation_matrix(0.000001 * u.deg, "x"),
rotation_matrix(0.000001 * u.deg, [1, 0, 0]),
)
def test_angle_axis():
m1 = rotation_matrix(35 * u.deg, "x")
an1, ax1 = angle_axis(m1)
assert an1 - 35 * u.deg < 1e-10 * u.deg
assert_allclose(ax1, [1, 0, 0])
m2 = rotation_matrix(-89 * u.deg, [1, 1, 0])
an2, ax2 = angle_axis(m2)
assert an2 - 89 * u.deg < 1e-10 * u.deg
assert_allclose(ax2, [-(2**-0.5), -(2**-0.5), 0])
def test_is_O3():
"""Test the matrix checker ``is_O3``."""
# Normal rotation matrix
m1 = rotation_matrix(35 * u.deg, "x")
assert is_O3(m1)
# and (M, 3, 3)
n1 = np.tile(m1, (2, 1, 1))
assert tuple(is_O3(n1)) == (True, True) # (show the broadcasting)
# Test atol parameter
nn1 = np.tile(0.5 * m1, (2, 1, 1))
assert tuple(is_O3(nn1)) == (False, False) # (show the broadcasting)
assert tuple(is_O3(nn1, atol=1)) == (True, True) # (show the broadcasting)
# reflection
m2 = m1.copy()
m2[0, 0] *= -1
assert is_O3(m2)
# and (M, 3, 3)
n2 = np.stack((m1, m2))
assert tuple(is_O3(n2)) == (True, True) # (show the broadcasting)
# Not any sort of O(3)
m3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert not is_O3(m3)
# and (M, 3, 3)
n3 = np.stack((m1, m3))
assert tuple(is_O3(n3)) == (True, False) # (show the broadcasting)
def test_is_rotation():
"""Test the rotation matrix checker ``is_rotation``."""
# Normal rotation matrix
m1 = rotation_matrix(35 * u.deg, "x")
assert is_rotation(m1)
assert is_rotation(m1, allow_improper=True) # (a less restrictive test)
# and (M, 3, 3)
n1 = np.tile(m1, (2, 1, 1))
assert tuple(is_rotation(n1)) == (True, True) # (show the broadcasting)
# Test atol parameter
nn1 = np.tile(0.5 * m1, (2, 1, 1))
assert tuple(is_rotation(nn1)) == (False, False) # (show the broadcasting)
assert tuple(is_rotation(nn1, atol=10)) == (True, True) # (show the broadcasting)
# Improper rotation (unit rotation + reflection)
m2 = np.identity(3)
m2[0, 0] = -1
assert not is_rotation(m2)
assert is_rotation(m2, allow_improper=True)
# and (M, 3, 3)
n2 = np.stack((m1, m2))
assert tuple(is_rotation(n2)) == (True, False) # (show the broadcasting)
# Not any sort of rotation
m3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert not is_rotation(m3)
assert not is_rotation(m3, allow_improper=True)
# and (M, 3, 3)
n3 = np.stack((m1, m3))
assert tuple(is_rotation(n3)) == (True, False) # (show the broadcasting)
def test_matrix_product_deprecation():
with pytest.warns(AstropyDeprecationWarning, match=r"Use @ instead\.$"):
matrix_product(np.eye(2))
|
03201d37414ef4d62379063f62ed9edcc0d7cc3e762d945df2a0b3e36e9edea1 | import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import CIRS, GCRS, AltAz, EarthLocation, SkyCoord
from astropy.coordinates.erfa_astrom import (
ErfaAstrom,
ErfaAstromInterpolator,
erfa_astrom,
)
from astropy.time import Time
from astropy.utils.exceptions import AstropyWarning
def test_science_state():
assert erfa_astrom.get().__class__ is ErfaAstrom
res = 300 * u.s
with erfa_astrom.set(ErfaAstromInterpolator(res)):
assert isinstance(erfa_astrom.get(), ErfaAstromInterpolator)
assert erfa_astrom.get().mjd_resolution == res.to_value(u.day)
# context manager should have switched it back
assert erfa_astrom.get().__class__ is ErfaAstrom
# must be a subclass of BaseErfaAstrom
with pytest.raises(TypeError):
erfa_astrom.set("foo")
def test_warnings():
with pytest.warns(AstropyWarning):
with erfa_astrom.set(ErfaAstromInterpolator(9 * u.us)):
pass
def test_erfa_astrom():
# I was having a pretty hard time in coming
# up with a unit test only testing the astrom provider
# that would not just test its implementation with its implementation
# so we test a coordinate conversion using it
location = EarthLocation(
lon=-17.891105 * u.deg,
lat=28.761584 * u.deg,
height=2200 * u.m,
)
obstime = Time("2020-01-01T18:00") + np.linspace(0, 1, 100) * u.hour
altaz = AltAz(location=location, obstime=obstime)
coord = SkyCoord(ra=83.63308333, dec=22.0145, unit=u.deg)
# do the reference transformation, no interpolation
ref = coord.transform_to(altaz)
with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
interp_300s = coord.transform_to(altaz)
# make sure they are actually different
assert np.any(ref.separation(interp_300s) > 0.005 * u.microarcsecond)
# make sure the resolution is as good as we expect
assert np.all(ref.separation(interp_300s) < 1 * u.microarcsecond)
def test_interpolation_nd():
"""
Test that the interpolation also works for nd-arrays
"""
fact = EarthLocation(
lon=-17.891105 * u.deg,
lat=28.761584 * u.deg,
height=2200 * u.m,
)
interp_provider = ErfaAstromInterpolator(300 * u.s)
provider = ErfaAstrom()
for shape in [tuple(), (1,), (10,), (3, 2), (2, 10, 5), (4, 5, 3, 2)]:
# create obstimes of the desired shapes
delta_t = np.linspace(0, 12, np.prod(shape, dtype=int)) * u.hour
obstime = (Time("2020-01-01T18:00") + delta_t).reshape(shape)
altaz = AltAz(location=fact, obstime=obstime)
gcrs = GCRS(obstime=obstime)
cirs = CIRS(obstime=obstime)
for frame, tcode in zip([altaz, cirs, gcrs], ["apio", "apco", "apcs"]):
without_interp = getattr(provider, tcode)(frame)
assert without_interp.shape == shape
with_interp = getattr(interp_provider, tcode)(frame)
assert with_interp.shape == shape
def test_interpolation_broadcasting():
import astropy.units as u
from astropy.coordinates import AltAz, EarthLocation, SkyCoord
from astropy.coordinates.angle_utilities import golden_spiral_grid
from astropy.coordinates.erfa_astrom import ErfaAstromInterpolator, erfa_astrom
from astropy.time import Time
# 1000 gridded locations on the sky
rep = golden_spiral_grid(100)
coord = SkyCoord(rep)
# 30 times over the space of 1 hours
times = Time("2020-01-01T20:00") + np.linspace(-0.5, 0.5, 30) * u.hour
lst1 = EarthLocation(
lon=-17.891498 * u.deg,
lat=28.761443 * u.deg,
height=2200 * u.m,
)
# note the use of broadcasting so that 300 times are broadcast against 1000 positions
aa_frame = AltAz(obstime=times[:, np.newaxis], location=lst1)
aa_coord = coord.transform_to(aa_frame)
with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
aa_coord_interp = coord.transform_to(aa_frame)
assert aa_coord.shape == aa_coord_interp.shape
assert np.all(aa_coord.separation(aa_coord_interp) < 1 * u.microarcsecond)
|
409dbaf8009aa9c8797d37ed19534d74688cd39d2d12e1fc44ec863a9722989a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.coordinates.angles import Angle, Latitude, Longitude
from astropy.coordinates.distances import Distance
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.coordinates.representation import (
DIFFERENTIAL_CLASSES,
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
BaseRepresentation,
CartesianDifferential,
CartesianRepresentation,
CylindricalDifferential,
CylindricalRepresentation,
PhysicsSphericalDifferential,
PhysicsSphericalRepresentation,
RadialDifferential,
RadialRepresentation,
SphericalCosLatDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose_quantity
from astropy.utils import isiterable
from astropy.utils.exceptions import DuplicateRepresentationWarning
# create matrices for use in testing ``.transform()`` methods
matrices = {
"rotation": rotation_matrix(-10, "z", u.deg),
"general": np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
}
# Preserve the original REPRESENTATION_CLASSES dict so that importing
# the test file doesn't add a persistent test subclass (LogDRepresentation)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def components_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= getattr(rep1, component) == getattr(rep2, component)
return result
def components_allclose(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= u.allclose(getattr(rep1, component), getattr(rep2, component))
return result
def representation_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, "_differentials", False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_equal(diff1, rep2._differentials[key])
elif getattr(rep2, "_differentials", False):
return False
return result & components_equal(rep1, rep2)
def representation_equal_up_to_angular_type(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, "_differentials", False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_allclose(diff1, rep2._differentials[key])
elif getattr(rep2, "_differentials", False):
return False
return result & components_allclose(rep1, rep2)
class TestRadialRepresentation:
def test_transform(self):
"""Test the ``transform`` method. Only multiplication matrices pass."""
rep = RadialRepresentation(distance=10 * u.kpc)
# a rotation matrix does not work
matrix = rotation_matrix(10 * u.deg)
with pytest.raises(ValueError, match="scaled identity matrix"):
rep.transform(matrix)
# only a scaled identity matrix
matrix = 3 * np.identity(3)
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
# let's also check with differentials
dif = RadialDifferential(d_distance=-3 * u.km / u.s)
rep = rep.with_differentials(dict(s=dif))
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
assert newrep.differentials["s"].d_distance == -9 * u.km / u.s
class TestSphericalRepresentation:
def test_name(self):
assert SphericalRepresentation.get_name() == "spherical"
assert SphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = SphericalRepresentation()
def test_init_quantity(self):
s3 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc
)
assert s3.lon == 8.0 * u.hourangle
assert s3.lat == 5.0 * u.deg
assert s3.distance == 10 * u.kpc
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
assert isinstance(s3.distance, Distance)
def test_init_no_mutate_input(self):
lon = -1 * u.hourangle
s = SphericalRepresentation(
lon=lon, lat=-1 * u.deg, distance=1 * u.kpc, copy=True
)
# The longitude component should be wrapped at 24 hours
assert_allclose_quantity(s.lon, 23 * u.hourangle)
# The input should not have been mutated by the constructor
assert_allclose_quantity(lon, -1 * u.hourangle)
def test_init_lonlat(self):
s2 = SphericalRepresentation(
Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc)
)
assert s2.lon == 8.0 * u.hourangle
assert s2.lat == 5.0 * u.deg
assert s2.distance == 10.0 * u.kpc
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
assert isinstance(s2.distance, Distance)
# also test that wrap_angle is preserved
s3 = SphericalRepresentation(
Longitude(-90, u.degree, wrap_angle=180 * u.degree),
Latitude(-45, u.degree),
Distance(1.0, u.Rsun),
)
assert s3.lon == -90.0 * u.degree
assert s3.lon.wrap_angle == 180 * u.degree
def test_init_subclass(self):
class Longitude180(Longitude):
_default_wrap_angle = 180 * u.degree
s = SphericalRepresentation(
Longitude180(-90, u.degree), Latitude(-45, u.degree), Distance(1.0, u.Rsun)
)
assert isinstance(s.lon, Longitude180)
assert s.lon == -90.0 * u.degree
assert s.lon.wrap_angle == 180 * u.degree
def test_init_array(self):
s1 = SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=[1, 2] * u.kpc
)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert_allclose(s1.distance.kpc, [1, 2])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
assert isinstance(s1.distance, Distance)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
distance = Distance([1, 2] * u.kpc)
s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
distance[:] = [8, 9] * u.Mpc
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
assert_allclose_quantity(distance, s1.distance)
def test_init_float32_array(self):
"""Regression test against #2983"""
lon = Longitude(np.float32([1.0, 2.0]), u.degree)
lat = Latitude(np.float32([3.0, 4.0]), u.degree)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
assert s1.lon.dtype == np.float32
assert s1.lat.dtype == np.float32
assert s1._values["lon"].dtype == np.float32
assert s1._values["lat"].dtype == np.float32
def test_reprobj(self):
s1 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc
)
s2 = SphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8.0 * u.hourangle)
assert_allclose_quantity(s2.lat, 5.0 * u.deg)
assert_allclose_quantity(s2.distance, 10 * u.kpc)
s3 = SphericalRepresentation(s1)
assert representation_equal(s1, s3)
def test_broadcasting(self):
s1 = SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=10 * u.kpc
)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
assert_allclose_quantity(s1.distance, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = SphericalRepresentation(
lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc,
)
assert (
exc.value.args[0]
== "Input parameters lon, lat, and distance cannot be broadcast"
)
def test_broadcasting_and_nocopy(self):
s1 = SphericalRepresentation(
lon=[200] * u.deg, lat=[0] * u.deg, distance=[0] * u.kpc, copy=False
)
# With no copying, we should be able to modify the wrap angle of the longitude component
s1.lon.wrap_angle = 180 * u.deg
s2 = SphericalRepresentation(
lon=[200] * u.deg, lat=0 * u.deg, distance=0 * u.kpc, copy=False
)
# We should be able to modify the wrap angle of the longitude component even if other
# components need to be broadcasted
s2.lon.wrap_angle = 180 * u.deg
def test_readonly(self):
s1 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=1.0 * u.kpc
)
with pytest.raises(AttributeError):
s1.lon = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.lat = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.distance = 1.0 * u.kpc
def test_getitem_len_iterable(self):
s = SphericalRepresentation(
lon=np.arange(10) * u.deg, lat=-np.arange(10) * u.deg, distance=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc)
assert len(s) == 10
assert isiterable(s)
def test_getitem_len_iterable_scalar(self):
s = SphericalRepresentation(lon=1 * u.deg, lat=-2 * u.deg, distance=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
with pytest.raises(TypeError):
len(s)
assert not isiterable(s)
def test_setitem(self):
s = SphericalRepresentation(
lon=np.arange(5) * u.deg, lat=-np.arange(5) * u.deg, distance=1 * u.kpc
)
s[:2] = SphericalRepresentation(
lon=10.0 * u.deg, lat=2.0 * u.deg, distance=5.0 * u.kpc
)
assert_allclose_quantity(s.lon, [10, 10, 2, 3, 4] * u.deg)
assert_allclose_quantity(s.lat, [2, 2, -2, -3, -4] * u.deg)
assert_allclose_quantity(s.distance, [5, 5, 1, 1, 1] * u.kpc)
def test_negative_distance(self):
"""Only allowed if explicitly passed on."""
with pytest.raises(ValueError, match="allow_negative"):
SphericalRepresentation(10 * u.deg, 20 * u.deg, -10 * u.m)
s1 = SphericalRepresentation(
10 * u.deg, 20 * u.deg, Distance(-10 * u.m, allow_negative=True)
)
assert s1.distance == -10.0 * u.m
def test_nan_distance(self):
"""This is a regression test: calling represent_as() and passing in the
same class as the object shouldn't round-trip through cartesian.
"""
sph = SphericalRepresentation(1 * u.deg, 2 * u.deg, np.nan * u.kpc)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
dif = SphericalCosLatDifferential(
1 * u.mas / u.yr, 2 * u.mas / u.yr, 3 * u.km / u.s
)
sph = sph.with_differentials(dif)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
def test_raise_on_extra_arguments(self):
with pytest.raises(TypeError, match="got multiple values"):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1.0 * u.kpc, lat=10)
with pytest.raises(TypeError, match="unexpected keyword.*parrot"):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1.0 * u.kpc, parrot=10)
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = SphericalCosLatDifferential(
4 * u.mas / u.yr, 5 * u.mas / u.yr, 6 * u.km / u.s
)
sph = SphericalRepresentation(
1 * u.deg, 2 * u.deg, 3 * u.kpc, differentials={"s": difs}
)
got = sph.represent_as(
PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
assert np.may_share_memory(sph.lon, got.phi)
assert np.may_share_memory(sph.distance, got.r)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
# equal up to angular type
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation, UnitSphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s,
)
s1 = SphericalRepresentation(
lon=[1, 2] * u.deg,
lat=[3, 4] * u.deg,
distance=[5, 6] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
# check differentials. they shouldn't have changed.
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_distance, ds1.d_distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation, SphericalDifferential)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s,
)
s1 = SphericalRepresentation(
lon=[1, 2] * u.deg,
lat=[3, 4] * u.deg,
distance=[5, np.nan] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# the 2nd component is NaN since the 2nd distance is NaN
# TODO! this will change when ``.transform`` skips Cartesian
assert_array_equal(np.isnan(ds2.d_lon), (False, True))
assert_array_equal(np.isnan(ds2.d_lat), (False, True))
assert_array_equal(np.isnan(ds2.d_distance), (False, True))
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(
SphericalRepresentation, differential_class=SphericalDifferential
)
)
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.lon), (False, False))
assert_array_equal(np.isnan(s3.lat), (False, False))
assert_array_equal(np.isnan(s3.distance), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_lon), (False, True))
assert_array_equal(np.isnan(ds3.d_lat), (False, True))
assert_array_equal(np.isnan(ds3.d_distance), (False, True))
# through Cartesian should
assert_array_equal(np.isnan(thruC.lon), (False, True))
assert_array_equal(np.isnan(thruC.lat), (False, True))
assert_array_equal(np.isnan(thruC.distance), (False, True))
assert_array_equal(np.isnan(dthruC.d_lon), (False, True))
assert_array_equal(np.isnan(dthruC.d_lat), (False, True))
assert_array_equal(np.isnan(dthruC.d_distance), (False, True))
# test that they are close on the first value
assert_allclose_quantity(s3.lon[0], thruC.lon[0])
assert_allclose_quantity(s3.lat[0], thruC.lat[0])
assert_allclose_quantity(ds3.d_lon[0], dthruC.d_lon[0])
assert_allclose_quantity(ds3.d_lat[0], dthruC.d_lat[0])
class TestUnitSphericalRepresentation:
def test_name(self):
assert UnitSphericalRepresentation.get_name() == "unitspherical"
assert UnitSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = UnitSphericalRepresentation()
def test_init_quantity(self):
s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
assert s3.lon == 8.0 * u.hourangle
assert s3.lat == 5.0 * u.deg
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
def test_init_lonlat(self):
s2 = UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg))
assert s2.lon == 8.0 * u.hourangle
assert s2.lat == 5.0 * u.deg
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
def test_init_array(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
def test_reprobj(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
s2 = UnitSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8.0 * u.hourangle)
assert_allclose_quantity(s2.lat, 5.0 * u.deg)
s3 = UnitSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = UnitSphericalRepresentation(
lon=[8, 9, 10] * u.hourangle, lat=[5, 6] * u.deg
)
assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast"
def test_readonly(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
with pytest.raises(AttributeError):
s1.lon = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.lat = 1.0 * u.deg
def test_getitem(self):
s = UnitSphericalRepresentation(
lon=np.arange(10) * u.deg, lat=-np.arange(10) * u.deg
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
def test_getitem_scalar(self):
s = UnitSphericalRepresentation(lon=1 * u.deg, lat=-2 * u.deg)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
# TODO! representation transformations with differentials cannot
# (currently) be implemented due to a mismatch between the UnitSpherical
# expected keys (e.g. "s") and that expected in the other class
# (here "s / m"). For more info, see PR #11467
# We leave the test code commented out for future use.
# diffs = UnitSphericalCosLatDifferential(4*u.mas/u.yr, 5*u.mas/u.yr,
# 6*u.km/u.s)
sph = UnitSphericalRepresentation(1 * u.deg, 2 * u.deg)
# , differentials={'s': diffs}
got = sph.represent_as(PhysicsSphericalRepresentation)
# , PhysicsSphericalDifferential)
assert np.may_share_memory(sph.lon, got.phi)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation
) # PhysicsSphericalDifferential
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(SphericalRepresentation)
# , SphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation
) # , SphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
)
s1 = UnitSphericalRepresentation(
lon=[1, 2] * u.deg, lat=[3, 4] * u.deg, differentials=ds1
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = UnitSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
# compare differentials. they should be unchanged (ds1).
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert not hasattr(ds2, "d_distance")
# now with a non rotation matrix
# note that the result will be a Spherical, not UnitSpherical
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(
SphericalRepresentation, differential_class=SphericalDifferential
)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
class TestPhysicsSphericalRepresentation:
def test_name(self):
assert PhysicsSphericalRepresentation.get_name() == "physicsspherical"
assert PhysicsSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = PhysicsSphericalRepresentation()
def test_init_quantity(self):
s3 = PhysicsSphericalRepresentation(
phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc
)
assert s3.phi == 8.0 * u.hourangle
assert s3.theta == 5.0 * u.deg
assert s3.r == 10 * u.kpc
assert isinstance(s3.phi, Angle)
assert isinstance(s3.theta, Angle)
assert isinstance(s3.r, Distance)
def test_init_phitheta(self):
s2 = PhysicsSphericalRepresentation(
Angle(8, u.hour), Angle(5, u.deg), Distance(10, u.kpc)
)
assert s2.phi == 8.0 * u.hourangle
assert s2.theta == 5.0 * u.deg
assert s2.r == 10.0 * u.kpc
assert isinstance(s2.phi, Angle)
assert isinstance(s2.theta, Angle)
assert isinstance(s2.r, Distance)
def test_init_array(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc
)
assert_allclose(s1.phi.degree, [120, 135])
assert_allclose(s1.theta.degree, [5, 6])
assert_allclose(s1.r.kpc, [1, 2])
assert isinstance(s1.phi, Angle)
assert isinstance(s1.theta, Angle)
assert isinstance(s1.r, Distance)
def test_init_array_nocopy(self):
phi = Angle([8, 9] * u.hourangle)
theta = Angle([5, 6] * u.deg)
r = Distance([1, 2] * u.kpc)
s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False)
phi[:] = [1, 2] * u.rad
theta[:] = [3, 4] * u.arcmin
r[:] = [8, 9] * u.Mpc
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(theta, s1.theta)
assert_allclose_quantity(r, s1.r)
def test_reprobj(self):
s1 = PhysicsSphericalRepresentation(
phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc
)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.phi, 8.0 * u.hourangle)
assert_allclose_quantity(s2.theta, 5.0 * u.deg)
assert_allclose_quantity(s2.r, 10 * u.kpc)
s3 = PhysicsSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=10 * u.kpc
)
assert_allclose_quantity(s1.phi, [120, 135] * u.degree)
assert_allclose_quantity(s1.theta, [5, 6] * u.degree)
assert_allclose_quantity(s1.r, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(
ValueError, match="Input parameters phi, theta, and r cannot be broadcast"
):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9, 10] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc
)
def test_readonly(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[10, 20] * u.kpc
)
with pytest.raises(AttributeError):
s1.phi = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.theta = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.r = 1.0 * u.kpc
def test_getitem(self):
s = PhysicsSphericalRepresentation(
phi=np.arange(10) * u.deg, theta=np.arange(5, 15) * u.deg, r=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg)
assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = PhysicsSphericalRepresentation(phi=1 * u.deg, theta=2 * u.deg, r=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = PhysicsSphericalDifferential(
4 * u.mas / u.yr, 5 * u.mas / u.yr, 6 * u.km / u.s
)
sph = PhysicsSphericalRepresentation(
1 * u.deg, 2 * u.deg, 3 * u.kpc, differentials={"s": difs}
)
got = sph.represent_as(SphericalRepresentation, SphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
assert np.may_share_memory(sph.r, got.distance)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation, SphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation, UnitSphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
def test_initialize_with_nan(self):
# Regression test for gh-11558: initialization used to fail.
psr = PhysicsSphericalRepresentation(
[1.0, np.nan] * u.deg, [np.nan, 2.0] * u.deg, [3.0, np.nan] * u.m
)
assert_array_equal(np.isnan(psr.phi), [False, True])
assert_array_equal(np.isnan(psr.theta), [True, False])
assert_array_equal(np.isnan(psr.r), [False, True])
def test_transform(self):
"""Test ``.transform()`` on rotation and general transform matrices."""
# set up representation
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr,
d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s,
)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg,
theta=[3, 4] * u.deg,
r=[5, 6] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
# compare differentials. should be unchanged (ds1).
assert_allclose_quantity(ds2.d_phi, ds1.d_phi)
assert_allclose_quantity(ds2.d_theta, ds1.d_theta)
assert_allclose_quantity(ds2.d_r, ds1.d_r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.theta, expected.theta)
assert_allclose_quantity(s3.r, expected.r)
assert_allclose_quantity(ds3.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds3.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds3.d_r, dexpected.d_r)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr,
d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s,
)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg,
theta=[3, 4] * u.deg,
r=[5, np.nan] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
)
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.phi), (False, False))
assert_array_equal(np.isnan(s3.theta), (False, False))
assert_array_equal(np.isnan(s3.r), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_phi), (False, True))
assert_array_equal(np.isnan(ds3.d_theta), (False, True))
assert_array_equal(np.isnan(ds3.d_r), (False, True))
# through Cartesian does
assert_array_equal(np.isnan(thruC.phi), (False, True))
assert_array_equal(np.isnan(thruC.theta), (False, True))
assert_array_equal(np.isnan(thruC.r), (False, True))
# so only test on the first value
assert_allclose_quantity(s3.phi[0], thruC.phi[0])
assert_allclose_quantity(s3.theta[0], thruC.theta[0])
assert_allclose_quantity(ds3.d_phi[0], dthruC.d_phi[0])
assert_allclose_quantity(ds3.d_theta[0], dthruC.d_theta[0])
class TestCartesianRepresentation:
def test_name(self):
assert CartesianRepresentation.get_name() == "cartesian"
assert CartesianRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CartesianRepresentation()
def test_init_quantity(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_singleunit(self):
s1 = CartesianRepresentation(x=1, y=2, z=3, unit=u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CartesianRepresentation(
x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.Mpc, z=[3, 4, 5] * u.kpc
)
assert s1.x.unit is u.pc
assert s1.y.unit is u.Mpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, [1, 2, 3])
assert_allclose(s1.y.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_one_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.pc
assert s1.z.unit is u.pc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
r = np.arange(27.0).reshape(3, 3, 3) * u.kpc
s2 = CartesianRepresentation(r, xyz_axis=0)
assert s2.shape == (3, 3)
assert s2.x.unit == u.kpc
assert np.all(s2.x == r[0])
assert np.all(s2.xyz == r)
assert np.all(s2.get_xyz(xyz_axis=0) == r)
s3 = CartesianRepresentation(r, xyz_axis=1)
assert s3.shape == (3, 3)
assert np.all(s3.x == r[:, 0])
assert np.all(s3.y == r[:, 1])
assert np.all(s3.z == r[:, 2])
assert np.all(s3.get_xyz(xyz_axis=1) == r)
s4 = CartesianRepresentation(r, xyz_axis=2)
assert s4.shape == (3, 3)
assert np.all(s4.x == r[:, :, 0])
assert np.all(s4.get_xyz(xyz_axis=2) == r)
s5 = CartesianRepresentation(r, unit=u.pc)
assert s5.x.unit == u.pc
assert np.all(s5.xyz == r)
s6 = CartesianRepresentation(r.value, unit=u.pc, xyz_axis=2)
assert s6.x.unit == u.pc
assert np.all(s6.get_xyz(xyz_axis=2).value == r.value)
def test_init_one_array_size_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc)
assert exc.value.args[0].startswith("too many values to unpack")
def test_init_xyz_but_more_than_one_array_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(
x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.pc, z=[3, 4, 5] * u.pc, xyz_axis=0
)
assert "xyz_axis should only be set" in str(exc.value)
def test_init_one_array_yz_fail(self):
with pytest.raises(
ValueError,
match="x, y, and z are required to instantiate CartesianRepresentation",
):
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc)
def test_init_array_nocopy(self):
x = [8, 9, 10] * u.pc
y = [5, 6, 7] * u.Mpc
z = [2, 3, 4] * u.kpc
s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False)
x[:] = [1, 2, 3] * u.kpc
y[:] = [9, 9, 8] * u.kpc
z[:] = [1, 2, 1] * u.kpc
assert_allclose_quantity(x, s1.x)
assert_allclose_quantity(y, s1.y)
assert_allclose_quantity(z, s1.z)
def test_xyz_is_view_if_possible(self):
xyz = np.arange(1.0, 10.0).reshape(3, 3)
s1 = CartesianRepresentation(xyz, unit=u.kpc, copy=False)
s1_xyz = s1.xyz
assert s1_xyz.value[0, 0] == 1.0
xyz[0, 0] = 0.0
assert s1.x[0] == 0.0
assert s1_xyz.value[0, 0] == 0.0
# Not possible: we don't check that tuples are from the same array
xyz = np.arange(1.0, 10.0).reshape(3, 3)
s2 = CartesianRepresentation(*xyz, unit=u.kpc, copy=False)
s2_xyz = s2.xyz
assert s2_xyz.value[0, 0] == 1.0
xyz[0, 0] = 0.0
assert s2.x[0] == 0.0
assert s2_xyz.value[0, 0] == 1.0
def test_reprobj(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
s2 = CartesianRepresentation.from_representation(s1)
assert s2.x == 1 * u.kpc
assert s2.y == 2 * u.kpc
assert s2.z == 3 * u.kpc
s3 = CartesianRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc)
assert s1.x.unit == u.kpc
assert s1.y.unit == u.kpc
assert s1.z.unit == u.kpc
assert_allclose(s1.x.value, [1, 2])
assert_allclose(s1.y.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(
x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc
)
assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast"
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.x = 1.0 * u.kpc
with pytest.raises(AttributeError):
s1.y = 1.0 * u.kpc
with pytest.raises(AttributeError):
s1.z = 1.0 * u.kpc
def test_xyz(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert isinstance(s1.xyz, u.Quantity)
assert s1.xyz.unit is u.kpc
assert_allclose(s1.xyz.value, [1, 2, 3])
def test_unit_mismatch(self):
q_len = u.Quantity([1], u.km)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
def test_unit_non_length(self):
s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg)
s2 = CartesianRepresentation(
x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s
)
banana = u.def_unit("banana")
s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana)
def test_getitem(self):
s = CartesianRepresentation(
x=np.arange(10) * u.m, y=-np.arange(10) * u.m, z=3 * u.km
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
def test_getitem_scalar(self):
s = CartesianRepresentation(x=1 * u.m, y=-2 * u.m, z=3 * u.km)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
ds1 = CartesianDifferential(
d_x=[1, 2] * u.km / u.s, d_y=[3, 4] * u.km / u.s, d_z=[5, 6] * u.km / u.s
)
s1 = CartesianRepresentation(
x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc, differentials=ds1
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["general"])
ds2 = s2.differentials["s"]
dexpected = CartesianDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["general"]), base=s2
)
assert_allclose_quantity(ds2.d_x, dexpected.d_x)
assert_allclose_quantity(ds2.d_y, dexpected.d_y)
assert_allclose_quantity(ds2.d_z, dexpected.d_z)
# also explicitly calculate, since we can
assert_allclose(s2.x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(s2.y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(s2.z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert_allclose(ds2.d_x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(ds2.d_y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(ds2.d_z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert s2.x.unit is u.kpc
assert s2.y.unit is u.kpc
assert s2.z.unit is u.kpc
assert ds2.d_x.unit == u.km / u.s
assert ds2.d_y.unit == u.km / u.s
assert ds2.d_z.unit == u.km / u.s
class TestCylindricalRepresentation:
def test_name(self):
assert CylindricalRepresentation.get_name() == "cylindrical"
assert CylindricalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CylindricalRepresentation()
def test_init_quantity(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
assert s1.rho.unit is u.kpc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, 1)
assert_allclose(s1.phi.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CylindricalRepresentation(
rho=[1, 2, 3] * u.pc, phi=[2, 3, 4] * u.deg, z=[3, 4, 5] * u.kpc
)
assert s1.rho.unit is u.pc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, [1, 2, 3])
assert_allclose(s1.phi.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_array_nocopy(self):
rho = [8, 9, 10] * u.pc
phi = [5, 6, 7] * u.deg
z = [2, 3, 4] * u.kpc
s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False)
rho[:] = [9, 2, 3] * u.kpc
phi[:] = [1, 2, 3] * u.arcmin
z[:] = [-2, 3, 8] * u.kpc
assert_allclose_quantity(rho, s1.rho)
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
s2 = CylindricalRepresentation.from_representation(s1)
assert s2.rho == 1 * u.kpc
assert s2.phi == 2 * u.deg
assert s2.z == 3 * u.kpc
s3 = CylindricalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CylindricalRepresentation(
rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc
)
assert s1.rho.unit == u.kpc
assert s1.phi.unit == u.deg
assert s1.z.unit == u.kpc
assert_allclose(s1.rho.value, [1, 2])
assert_allclose(s1.phi.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(
ValueError, match="Input parameters rho, phi, and z cannot be broadcast"
):
s1 = CylindricalRepresentation(
rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc
)
def test_readonly(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=20 * u.deg, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.rho = 1.0 * u.kpc
with pytest.raises(AttributeError):
s1.phi = 20 * u.deg
with pytest.raises(AttributeError):
s1.z = 1.0 * u.kpc
def unit_mismatch(self):
q_len = u.Quantity([1], u.kpc)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len)
assert exc.value.args[0] == "rho and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen)
assert exc.value.args[0] == "rho and z should have matching physical types"
def test_getitem(self):
s = CylindricalRepresentation(
rho=np.arange(10) * u.pc, phi=-np.arange(10) * u.deg, z=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc)
assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = CylindricalRepresentation(rho=1 * u.pc, phi=-2 * u.deg, z=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
s1 = CylindricalRepresentation(
phi=[1, 2] * u.deg, z=[3, 4] * u.pc, rho=[5, 6] * u.kpc
)
s2 = s1.transform(matrices["rotation"])
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.z, s1.z)
assert_allclose_quantity(s2.rho, s1.rho)
assert s2.phi.unit is u.rad
assert s2.z.unit is u.kpc
assert s2.rho.unit is u.kpc
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
expected = (s1.to_cartesian().transform(matrices["general"])).represent_as(
CylindricalRepresentation
)
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.z, expected.z)
assert_allclose_quantity(s3.rho, expected.rho)
class TestUnitSphericalCosLatDifferential:
@pytest.mark.parametrize("matrix", list(matrices.values()))
def test_transform(self, matrix):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalCosLatDifferential(
d_lon_coslat=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
)
s1 = UnitSphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrix)
ds2 = ds1.transform(matrix, s1, s2)
dexpected = UnitSphericalCosLatDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrix), base=s2
)
assert_allclose_quantity(ds2.d_lon_coslat, dexpected.d_lon_coslat)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
def test_cartesian_spherical_roundtrip():
s1 = CartesianRepresentation(
x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc
)
s2 = SphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = SphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.lon, s4.lon)
assert_allclose_quantity(s2.lat, s4.lat)
assert_allclose_quantity(s2.distance, s4.distance)
def test_cartesian_setting_with_other():
s1 = CartesianRepresentation(
x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc
)
s1[0] = SphericalRepresentation(0.0 * u.deg, 0.0 * u.deg, 1 * u.kpc)
assert_allclose_quantity(s1.x, [1.0, 2000.0] * u.kpc)
assert_allclose_quantity(s1.y, [0.0, 4.0] * u.pc)
assert_allclose_quantity(s1.z, [0.0, 6000.0] * u.pc)
with pytest.raises(ValueError, match="loss of information"):
s1[1] = UnitSphericalRepresentation(0.0 * u.deg, 10.0 * u.deg)
def test_cartesian_physics_spherical_roundtrip():
s1 = CartesianRepresentation(
x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc
)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
def test_spherical_physics_spherical_roundtrip():
s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s3.lon)
assert_allclose_quantity(s1.lat, s3.lat)
assert_allclose_quantity(s1.distance, s3.distance)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
assert_allclose_quantity(s1.lon, s4.phi)
assert_allclose_quantity(s1.lat, 90.0 * u.deg - s4.theta)
assert_allclose_quantity(s1.distance, s4.r)
def test_cartesian_cylindrical_roundtrip():
s1 = CartesianRepresentation(
x=np.array([1.0, 2000.0]) * u.kpc,
y=np.array([3000.0, 4.0]) * u.pc,
z=np.array([5.0, 600.0]) * u.cm,
)
s2 = CylindricalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = CylindricalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.rho, s4.rho)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.z, s4.z)
def test_unit_spherical_roundtrip():
s1 = UnitSphericalRepresentation(
lon=[10.0, 30.0] * u.deg, lat=[5.0, 6.0] * u.arcmin
)
s2 = CartesianRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = UnitSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s4.lon)
assert_allclose_quantity(s1.lat, s4.lat)
def test_no_unnecessary_copies():
s1 = UnitSphericalRepresentation(
lon=[10.0, 30.0] * u.deg, lat=[5.0, 6.0] * u.arcmin
)
s2 = s1.represent_as(UnitSphericalRepresentation)
assert s2 is s1
assert np.may_share_memory(s1.lon, s2.lon)
assert np.may_share_memory(s1.lat, s2.lat)
s3 = s1.represent_as(SphericalRepresentation)
assert np.may_share_memory(s1.lon, s3.lon)
assert np.may_share_memory(s1.lat, s3.lat)
s4 = s1.represent_as(CartesianRepresentation)
s5 = s4.represent_as(CylindricalRepresentation)
assert np.may_share_memory(s5.z, s4.z)
def test_representation_repr():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert (
repr(r1) == "<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n"
" (1., 2.5, 1.)>"
)
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert repr(r2) == "<CartesianRepresentation (x, y, z) in kpc\n (1., 2., 3.)>"
r3 = CartesianRepresentation(
x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc
)
assert (
repr(r3) == "<CartesianRepresentation (x, y, z) in kpc\n"
" [(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)]>"
)
def test_representation_repr_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit="m")
assert (
repr(cr) == "<CartesianRepresentation (x, y, z) in m\n"
" [[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n"
" [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n"
" [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]]>"
)
# This was broken before.
assert (
repr(cr.T) == "<CartesianRepresentation (x, y, z) in m\n"
" [[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n"
" [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n"
" [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]]>"
)
def test_representation_str():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert str(r1) == "(1., 2.5, 1.) (deg, deg, kpc)"
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert str(r2) == "(1., 2., 3.) kpc"
r3 = CartesianRepresentation(
x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc
)
assert str(r3) == "[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc"
def test_representation_str_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit="m")
assert (
str(cr) == "[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n"
" [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n"
" [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m"
)
# This was broken before.
assert (
str(cr.T) == "[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n"
" [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n"
" [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m"
)
def test_subclass_representation():
from astropy.coordinates.builtin_frames import ICRS
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(
cls, angle, unit=unit, wrap_angle=wrap_angle, **kwargs
)
return self
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = {"lon": Longitude180, "lat": Latitude, "distance": u.Quantity}
class ICRSWrap180(ICRS):
frame_specific_representation_info = (
ICRS._frame_specific_representation_info.copy()
)
frame_specific_representation_info[
SphericalWrap180Representation
] = frame_specific_representation_info[SphericalRepresentation]
default_representation = SphericalWrap180Representation
c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m)
assert c.ra.value == -1
assert c.ra.unit is u.deg
assert c.dec.value == -2
assert c.dec.unit is u.deg
def test_minimal_subclass():
# Basically to check what we document works;
# see doc/coordinates/representations.rst
class LogDRepresentation(BaseRepresentation):
attr_classes = {"lon": Longitude, "lat": Latitude, "logd": u.Dex}
def to_cartesian(self):
d = self.logd.physical
x = d * np.cos(self.lat) * np.cos(self.lon)
y = d * np.cos(self.lat) * np.sin(self.lon)
z = d * np.sin(self.lat)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
lon = np.arctan2(cart.y, cart.x)
lat = np.arctan2(cart.z, s)
return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False)
ld1 = LogDRepresentation(90.0 * u.deg, 0.0 * u.deg, 1.0 * u.dex(u.kpc))
ld2 = LogDRepresentation(lon=90.0 * u.deg, lat=0.0 * u.deg, logd=1.0 * u.dex(u.kpc))
assert np.all(ld1.lon == ld2.lon)
assert np.all(ld1.lat == ld2.lat)
assert np.all(ld1.logd == ld2.logd)
c = ld1.to_cartesian()
assert_allclose_quantity(c.xyz, [0.0, 10.0, 0.0] * u.kpc, atol=1.0 * u.npc)
ld3 = LogDRepresentation.from_cartesian(c)
assert np.all(ld3.lon == ld2.lon)
assert np.all(ld3.lat == ld2.lat)
assert np.all(ld3.logd == ld2.logd)
s = ld1.represent_as(SphericalRepresentation)
assert_allclose_quantity(s.lon, ld1.lon)
assert_allclose_quantity(s.distance, 10.0 * u.kpc)
assert_allclose_quantity(s.lat, ld1.lat)
with pytest.raises(TypeError):
LogDRepresentation(0.0 * u.deg, 1.0 * u.deg)
with pytest.raises(TypeError):
LogDRepresentation(
0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), lon=1.0 * u.deg
)
with pytest.raises(TypeError):
LogDRepresentation(0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), True, False)
with pytest.raises(TypeError):
LogDRepresentation(0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), foo="bar")
# if we define it a second time, even the qualnames are the same,
# so we raise
with pytest.raises(ValueError):
class LogDRepresentation(BaseRepresentation):
attr_classes = {"lon": Longitude, "lat": Latitude, "logr": u.Dex}
def test_duplicate_warning():
from astropy.coordinates.representation import (
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
)
with pytest.warns(DuplicateRepresentationWarning):
class UnitSphericalRepresentation(BaseRepresentation):
attr_classes = {"lon": Longitude, "lat": Latitude}
assert "unitspherical" in DUPLICATE_REPRESENTATIONS
assert "unitspherical" not in REPRESENTATION_CLASSES
assert (
"astropy.coordinates.representation.spherical.UnitSphericalRepresentation"
in REPRESENTATION_CLASSES
)
assert (
__name__ + ".test_duplicate_warning.<locals>.UnitSphericalRepresentation"
in REPRESENTATION_CLASSES
)
class TestCartesianRepresentationWithDifferential:
def test_init_differential(self):
diff = CartesianDifferential(
d_x=1 * u.km / u.s, d_y=2 * u.km / u.s, d_z=3 * u.km / u.s
)
# Check that a single differential gets turned into a 1-item dict.
s1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
# can also pass in an explicit dictionary
s1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={"s": diff}
)
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
# using the wrong key will cause it to fail
with pytest.raises(ValueError):
s1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={"1 / s2": diff}
)
# make sure other kwargs are handled properly
s1 = CartesianRepresentation(
x=1, y=2, z=3, differentials=diff, copy=False, unit=u.kpc
)
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
with pytest.raises(TypeError): # invalid type passed to differentials
CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials="garmonbozia"
)
# And that one can add it to another representation.
s1 = CartesianRepresentation(
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc),
differentials=diff,
)
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
# make sure differentials can't accept differentials
with pytest.raises(TypeError):
CartesianDifferential(
d_x=1 * u.km / u.s,
d_y=2 * u.km / u.s,
d_z=3 * u.km / u.s,
differentials=diff,
)
def test_init_differential_compatible(self):
# TODO: more extensive checking of this
# should fail - representation and differential not compatible
diff = SphericalDifferential(
d_lon=1 * u.mas / u.yr, d_lat=2 * u.mas / u.yr, d_distance=3 * u.km / u.s
)
with pytest.raises(TypeError):
CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(
d_lon_coslat=1 * u.mas / u.yr,
d_lat=2 * u.mas / u.yr,
d_distance=3 * u.km / u.s,
)
r1 = SphericalRepresentation(
lon=15 * u.deg, lat=21 * u.deg, distance=1 * u.pc, differentials=diff
)
def test_init_differential_multiple_equivalent_keys(self):
d1 = CartesianDifferential(*[1, 2, 3] * u.km / u.s)
d2 = CartesianDifferential(*[4, 5, 6] * u.km / u.s)
# verify that the check against expected_unit validates against passing
# in two different but equivalent keys
with pytest.raises(ValueError):
r1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={"s": d1, "yr": d2}
)
def test_init_array_broadcasting(self):
arr1 = np.arange(8).reshape(4, 2) * u.km / u.s
diff = CartesianDifferential(d_x=arr1, d_y=arr1, d_z=arr1)
# shapes aren't compatible
arr2 = np.arange(27).reshape(3, 9) * u.kpc
with pytest.raises(ValueError):
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, differentials=diff)
arr2 = np.arange(8).reshape(4, 2) * u.kpc
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, differentials=diff)
assert rep.x.unit is u.kpc
assert rep.y.unit is u.kpc
assert rep.z.unit is u.kpc
assert len(rep.differentials) == 1
assert rep.differentials["s"] is diff
assert rep.xyz.shape == rep.differentials["s"].d_xyz.shape
def test_reprobj(self):
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(
d_lon_coslat=1 * u.mas / u.yr,
d_lat=2 * u.mas / u.yr,
d_distance=3 * u.km / u.s,
)
r1 = SphericalRepresentation(
lon=15 * u.deg, lat=21 * u.deg, distance=1 * u.pc, differentials=diff
)
r2 = CartesianRepresentation.from_representation(r1)
assert r2.get_name() == "cartesian"
assert not r2.differentials
r3 = SphericalRepresentation(r1)
assert r3.differentials
assert representation_equal(r3, r1)
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError): # attribute is not settable
s1.differentials = "thing"
def test_represent_as(self):
diff = CartesianDifferential(
d_x=1 * u.km / u.s, d_y=2 * u.km / u.s, d_z=3 * u.km / u.s
)
rep1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
# Only change the representation, drop the differential
new_rep = rep1.represent_as(SphericalRepresentation)
assert new_rep.get_name() == "spherical"
assert not new_rep.differentials # dropped
# Pass in separate classes for representation, differential
new_rep = rep1.represent_as(
SphericalRepresentation, SphericalCosLatDifferential
)
assert new_rep.get_name() == "spherical"
assert new_rep.differentials["s"].get_name() == "sphericalcoslat"
# Pass in a dictionary for the differential classes
new_rep = rep1.represent_as(
SphericalRepresentation, {"s": SphericalCosLatDifferential}
)
assert new_rep.get_name() == "spherical"
assert new_rep.differentials["s"].get_name() == "sphericalcoslat"
# make sure represent_as() passes through the differentials
for name in REPRESENTATION_CLASSES:
if name == "radial":
# TODO: Converting a CartesianDifferential to a
# RadialDifferential fails, even on `main`
continue
elif "geodetic" in name or "bodycentric" in name:
# TODO: spheroidal representations (geodetic or bodycentric)
# do not have differentials yet
continue
new_rep = rep1.represent_as(
REPRESENTATION_CLASSES[name], DIFFERENTIAL_CLASSES[name]
)
assert new_rep.get_name() == name
assert len(new_rep.differentials) == 1
assert new_rep.differentials["s"].get_name() == name
with pytest.raises(ValueError) as excinfo:
rep1.represent_as("name")
assert "use frame object" in str(excinfo.value)
@pytest.mark.parametrize(
"sph_diff,usph_diff",
[
(SphericalDifferential, UnitSphericalDifferential),
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential),
],
)
def test_represent_as_unit_spherical_with_diff(self, sph_diff, usph_diff):
"""Test that differential angles are correctly reduced."""
diff = CartesianDifferential(
d_x=1 * u.km / u.s, d_y=2 * u.km / u.s, d_z=3 * u.km / u.s
)
rep = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
sph = rep.represent_as(SphericalRepresentation, sph_diff)
usph = rep.represent_as(UnitSphericalRepresentation, usph_diff)
assert components_equal(usph, sph.represent_as(UnitSphericalRepresentation))
assert components_equal(
usph.differentials["s"], sph.differentials["s"].represent_as(usph_diff)
)
# Just to be sure components_equal and the represent_as work as advertised,
# a sanity check: d_lat is always defined and should be the same.
assert_array_equal(sph.differentials["s"].d_lat, usph.differentials["s"].d_lat)
def test_getitem(self):
d = CartesianDifferential(
d_x=np.arange(10) * u.m / u.s,
d_y=-np.arange(10) * u.m / u.s,
d_z=1.0 * u.m / u.s,
)
s = CartesianRepresentation(
x=np.arange(10) * u.m, y=-np.arange(10) * u.m, z=3 * u.km, differentials=d
)
s_slc = s[2:8:2]
s_dif = s_slc.differentials["s"]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
assert_allclose_quantity(s_dif.d_x, [2, 4, 6] * u.m / u.s)
assert_allclose_quantity(s_dif.d_y, [-2, -4, -6] * u.m / u.s)
assert_allclose_quantity(s_dif.d_z, [1, 1, 1] * u.m / u.s)
def test_setitem(self):
d = CartesianDifferential(
d_x=np.arange(5) * u.m / u.s,
d_y=-np.arange(5) * u.m / u.s,
d_z=1.0 * u.m / u.s,
)
s = CartesianRepresentation(
x=np.arange(5) * u.m, y=-np.arange(5) * u.m, z=3 * u.km, differentials=d
)
s[:2] = s[2]
assert_array_equal(s.x, [2, 2, 2, 3, 4] * u.m)
assert_array_equal(s.y, [-2, -2, -2, -3, -4] * u.m)
assert_array_equal(s.z, [3, 3, 3, 3, 3] * u.km)
assert_array_equal(s.differentials["s"].d_x, [2, 2, 2, 3, 4] * u.m / u.s)
assert_array_equal(s.differentials["s"].d_y, [-2, -2, -2, -3, -4] * u.m / u.s)
assert_array_equal(s.differentials["s"].d_z, [1, 1, 1, 1, 1] * u.m / u.s)
s2 = s.represent_as(SphericalRepresentation, SphericalDifferential)
s[0] = s2[3]
assert_allclose_quantity(s.x, [3, 2, 2, 3, 4] * u.m)
assert_allclose_quantity(s.y, [-3, -2, -2, -3, -4] * u.m)
assert_allclose_quantity(s.z, [3, 3, 3, 3, 3] * u.km)
assert_allclose_quantity(s.differentials["s"].d_x, [3, 2, 2, 3, 4] * u.m / u.s)
assert_allclose_quantity(
s.differentials["s"].d_y, [-3, -2, -2, -3, -4] * u.m / u.s
)
assert_allclose_quantity(s.differentials["s"].d_z, [1, 1, 1, 1, 1] * u.m / u.s)
s3 = CartesianRepresentation(
s.xyz,
differentials={
"s": d,
"s2": CartesianDifferential(np.ones((3, 5)) * u.m / u.s**2),
},
)
with pytest.raises(ValueError, match="same differentials"):
s[0] = s3[2]
s4 = SphericalRepresentation(
0.0 * u.deg,
0.0 * u.deg,
1.0 * u.kpc,
differentials=RadialDifferential(10 * u.km / u.s),
)
with pytest.raises(ValueError, match="loss of information"):
s[0] = s4
def test_transform(self):
d1 = CartesianDifferential(
d_x=[1, 2] * u.km / u.s, d_y=[3, 4] * u.km / u.s, d_z=[5, 6] * u.km / u.s
)
r1 = CartesianRepresentation(
x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc, differentials=d1
)
r2 = r1.transform(matrices["general"])
d2 = r2.differentials["s"]
assert_allclose_quantity(d2.d_x, [22.0, 28] * u.km / u.s)
assert_allclose_quantity(d2.d_y, [49, 64] * u.km / u.s)
assert_allclose_quantity(d2.d_z, [76, 100.0] * u.km / u.s)
def test_with_differentials(self):
# make sure with_differential correctly creates a new copy with the same
# differential
cr = CartesianRepresentation([1, 2, 3] * u.kpc)
diff = CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s)
cr2 = cr.with_differentials(diff)
assert cr.differentials != cr2.differentials
assert cr2.differentials["s"] is diff
# make sure it works even if a differential is present already
diff2 = CartesianDifferential([0.1, 0.2, 0.3] * u.m / u.s)
cr3 = CartesianRepresentation([1, 2, 3] * u.kpc, differentials=diff)
cr4 = cr3.with_differentials(diff2)
assert cr4.differentials["s"] != cr3.differentials["s"]
assert cr4.differentials["s"] == diff2
# also ensure a *scalar* differential will works
cr5 = cr.with_differentials(diff)
assert len(cr5.differentials) == 1
assert cr5.differentials["s"] == diff
# make sure we don't update the original representation's dict
d1 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km / u.s)
d2 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km / u.s**2)
r1 = CartesianRepresentation(
*np.random.random((3, 5)), unit=u.pc, differentials=d1
)
r2 = r1.with_differentials(d2)
assert r1.differentials["s"] is r2.differentials["s"]
assert "s2" not in r1.differentials
assert "s2" in r2.differentials
def test_repr_with_differentials():
diff = CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s)
cr = CartesianRepresentation([1, 2, 3] * u.kpc, differentials=diff)
assert "has differentials w.r.t.: 's'" in repr(cr)
def test_to_cartesian():
"""
Test that to_cartesian drops the differential.
"""
sd = SphericalDifferential(d_lat=1 * u.deg, d_lon=2 * u.deg, d_distance=10 * u.m)
sr = SphericalRepresentation(
lat=1 * u.deg, lon=2 * u.deg, distance=10 * u.m, differentials=sd
)
cart = sr.to_cartesian()
assert cart.get_name() == "cartesian"
assert not cart.differentials
@pytest.fixture
def unitphysics():
"""
This fixture is used
"""
had_unit = False
if hasattr(PhysicsSphericalRepresentation, "_unit_representation"):
orig = PhysicsSphericalRepresentation._unit_representation
had_unit = True
class UnitPhysicsSphericalRepresentation(BaseRepresentation):
attr_classes = {"phi": Angle, "theta": Angle}
def __init__(self, *args, copy=True, **kwargs):
super().__init__(*args, copy=copy, **kwargs)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.0 * u.deg) or np.any(self._theta > 180.0 * u.deg):
raise ValueError(
"Inclination angle(s) must be within 0 deg <= angle <= 180 deg, "
f"got {self._theta.to(u.degree)}"
)
@property
def phi(self):
return self._phi
@property
def theta(self):
return self._theta
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
"phi": CartesianRepresentation(-sinphi, cosphi, 0.0, copy=False),
"theta": CartesianRepresentation(
costheta * cosphi, costheta * sinphi, -sintheta, copy=False
),
}
def scale_factors(self):
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"phi", sintheta, "theta", l}
def to_cartesian(self):
x = np.sin(self.theta) * np.cos(self.phi)
y = np.sin(self.theta) * np.sin(self.phi)
z = np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, copy=False)
def norm(self):
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False)
PhysicsSphericalRepresentation._unit_representation = (
UnitPhysicsSphericalRepresentation
)
yield UnitPhysicsSphericalRepresentation
if had_unit:
PhysicsSphericalRepresentation._unit_representation = orig
else:
del PhysicsSphericalRepresentation._unit_representation
# remove from the module-level representations, if present
REPRESENTATION_CLASSES.pop(UnitPhysicsSphericalRepresentation.get_name(), None)
def test_unitphysics(unitphysics):
obj = unitphysics(phi=0 * u.deg, theta=10 * u.deg)
objkw = unitphysics(phi=0 * u.deg, theta=10 * u.deg)
assert objkw.phi == obj.phi
assert objkw.theta == obj.theta
asphys = obj.represent_as(PhysicsSphericalRepresentation)
assert asphys.phi == obj.phi
assert_allclose(asphys.theta, obj.theta)
assert_allclose_quantity(asphys.r, 1 * u.dimensionless_unscaled)
assph = obj.represent_as(SphericalRepresentation)
assert assph.lon == obj.phi
assert_allclose_quantity(assph.lat, 80 * u.deg)
assert_allclose_quantity(assph.distance, 1 * u.dimensionless_unscaled)
with pytest.raises(TypeError, match="got multiple values"):
unitphysics(1 * u.deg, 2 * u.deg, theta=10)
with pytest.raises(TypeError, match="unexpected keyword.*parrot"):
unitphysics(1 * u.deg, 2 * u.deg, parrot=10)
def test_distance_warning(recwarn):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1 * u.kpc)
with pytest.raises(ValueError) as excinfo:
SphericalRepresentation(1 * u.deg, 2 * u.deg, -1 * u.kpc)
assert "Distance must be >= 0" in str(excinfo.value)
# second check is because the "originating" ValueError says the above,
# while the representation one includes the below
assert "you must explicitly pass" in str(excinfo.value)
def test_dtype_preservation_in_indexing():
# Regression test for issue #8614 (fixed in #8876)
xyz = np.array([[1, 0, 0], [0.9, 0.1, 0]], dtype="f4")
cr = CartesianRepresentation(xyz, xyz_axis=-1, unit="km")
assert cr.xyz.dtype == xyz.dtype
cr0 = cr[0]
# This used to fail.
assert cr0.xyz.dtype == xyz.dtype
class TestInfo:
def setup_class(cls):
cls.rep = SphericalRepresentation([0, 1] * u.deg, [2, 3] * u.deg, 10 * u.pc)
cls.diff = SphericalDifferential(
[10, 20] * u.mas / u.yr, [30, 40] * u.mas / u.yr, [50, 60] * u.km / u.s
)
cls.rep_w_diff = SphericalRepresentation(cls.rep, differentials=cls.diff)
def test_info_unit(self):
assert self.rep.info.unit == "deg, deg, pc"
assert self.diff.info.unit == "mas / yr, mas / yr, km / s"
assert self.rep_w_diff.info.unit == "deg, deg, pc"
@pytest.mark.parametrize("item", ["rep", "diff", "rep_w_diff"])
def test_roundtrip(self, item):
rep_or_diff = getattr(self, item)
as_dict = rep_or_diff.info._represent_as_dict()
new = rep_or_diff.__class__.info._construct_from_dict(as_dict)
assert np.all(representation_equal(new, rep_or_diff))
@pytest.mark.parametrize(
"cls",
[
SphericalDifferential,
SphericalCosLatDifferential,
CylindricalDifferential,
PhysicsSphericalDifferential,
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
],
)
def test_differential_norm_noncartesian(cls):
# The norm of a non-Cartesian differential without specifying `base` should error
rep = cls(0, 0, 0)
with pytest.raises(ValueError, match=r"`base` must be provided .* " + cls.__name__):
rep.norm()
def test_differential_norm_radial():
# Unlike most non-Cartesian differentials, the norm of a radial differential does not require `base`
rep = RadialDifferential(1 * u.km / u.s)
assert_allclose_quantity(rep.norm(), 1 * u.km / u.s)
|
2f4ca75e05317751d448c42b2fdf01ffc59abe837bc267f9c2cb244d0e5d0085 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for GCRS coordinate transformations, primarily to/from AltAz.
"""
import os
import warnings
from importlib import metadata
import erfa
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
CIRS,
GCRS,
HCRS,
ICRS,
ITRS,
TEME,
TETE,
AltAz,
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
HADec,
HeliocentricMeanEcliptic,
PrecessedGeocentric,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
get_sun,
solar_system_ephemeris,
)
from astropy.coordinates.angle_utilities import golden_spiral_grid
from astropy.coordinates.builtin_frames.intermediate_rotation_transforms import (
cirs_to_itrs_mat,
gcrs_to_cirs_mat,
get_location_gcrs,
tete_to_itrs_mat,
)
from astropy.coordinates.builtin_frames.utils import get_jd12
from astropy.coordinates.solar_system import get_body
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
from astropy.utils import iers
from astropy.utils.compat.optional_deps import HAS_JPLEPHEM
from astropy.utils.exceptions import AstropyWarning
CI = os.environ.get("CI", False) == "true"
def test_icrs_cirs():
"""
Check a few cases of ICRS<->CIRS for consistency.
Also includes the CIRS<->CIRS transforms at different times, as those go
through ICRS
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.0, 1, len(usph)) * u.pc
inod = ICRS(usph)
iwd = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
cframe1 = CIRS()
cirsnod = inod.transform_to(cframe1) # uses the default time
# first do a round-tripping test
inod2 = cirsnod.transform_to(ICRS())
assert_allclose(inod.ra, inod2.ra)
assert_allclose(inod.dec, inod2.dec)
# now check that a different time yields different answers
cframe2 = CIRS(obstime=Time("J2005"))
cirsnod2 = inod.transform_to(cframe2)
assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8)
assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8)
# parallax effects should be included, so with and w/o distance should be different
cirswd = iwd.transform_to(cframe1)
assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8)
# and the distance should transform at least somehow
assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8)
# now check that the cirs self-transform works as expected
cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op
assert_allclose(cirsnod.ra, cirsnod3.ra)
assert_allclose(cirsnod.dec, cirsnod3.dec)
cirsnod4 = cirsnod.transform_to(cframe2) # should be different
assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8)
cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same
assert_allclose(cirsnod.ra, cirsnod5.ra)
assert_allclose(cirsnod.dec, cirsnod5.dec)
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
icrs_coords = [ICRS(usph), ICRS(usph.lon, usph.lat, distance=dist)]
gcrs_frames = [GCRS(), GCRS(obstime=Time("J2005"))]
@pytest.mark.parametrize("icoo", icrs_coords)
def test_icrs_gcrs(icoo):
"""
Check ICRS<->GCRS for consistency
"""
gcrscoo = icoo.transform_to(gcrs_frames[0]) # uses the default time
# first do a round-tripping test
icoo2 = gcrscoo.transform_to(ICRS())
assert_allclose(icoo.distance, icoo2.distance)
assert_allclose(icoo.ra, icoo2.ra)
assert_allclose(icoo.dec, icoo2.dec)
assert isinstance(icoo2.data, icoo.data.__class__)
# now check that a different time yields different answers
gcrscoo2 = icoo.transform_to(gcrs_frames[1])
assert not allclose(gcrscoo.ra, gcrscoo2.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo.dec, gcrscoo2.dec, rtol=1e-8, atol=1e-10 * u.deg)
# now check that the cirs self-transform works as expected
gcrscoo3 = gcrscoo.transform_to(gcrs_frames[0]) # should be a no-op
assert_allclose(gcrscoo.ra, gcrscoo3.ra)
assert_allclose(gcrscoo.dec, gcrscoo3.dec)
gcrscoo4 = gcrscoo.transform_to(gcrs_frames[1]) # should be different
assert not allclose(gcrscoo4.ra, gcrscoo.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo4.dec, gcrscoo.dec, rtol=1e-8, atol=1e-10 * u.deg)
gcrscoo5 = gcrscoo4.transform_to(gcrs_frames[0]) # should be back to the same
assert_allclose(gcrscoo.ra, gcrscoo5.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert_allclose(gcrscoo.dec, gcrscoo5.dec, rtol=1e-8, atol=1e-10 * u.deg)
# also make sure that a GCRS with a different geoloc/geovel gets a different answer
# roughly a moon-like frame
gframe3 = GCRS(obsgeoloc=[385000.0, 0, 0] * u.km, obsgeovel=[1, 0, 0] * u.km / u.s)
gcrscoo6 = icoo.transform_to(gframe3) # should be different
assert not allclose(gcrscoo.ra, gcrscoo6.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo.dec, gcrscoo6.dec, rtol=1e-8, atol=1e-10 * u.deg)
icooviag3 = gcrscoo6.transform_to(ICRS()) # and now back to the original
assert_allclose(icoo.ra, icooviag3.ra)
assert_allclose(icoo.dec, icooviag3.dec)
@pytest.mark.parametrize("gframe", gcrs_frames)
def test_icrs_gcrs_dist_diff(gframe):
"""
Check that with and without distance give different ICRS<->GCRS answers
"""
gcrsnod = icrs_coords[0].transform_to(gframe)
gcrswd = icrs_coords[1].transform_to(gframe)
# parallax effects should be included, so with and w/o distance should be different
assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10 * u.deg)
# and the distance should transform at least somehow
assert not allclose(
gcrswd.distance, icrs_coords[1].distance, rtol=1e-8, atol=1e-10 * u.pc
)
def test_cirs_to_altaz():
"""
Check the basic CIRS<->AltAz transforms. More thorough checks implicitly
happen in `test_iau_fullstack`
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
cirs = CIRS(usph, obstime="J2000")
crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist)
cirscart = CIRS(
crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
altazframe = AltAz(location=loc, obstime=Time("J2005"))
cirs2 = cirs.transform_to(altazframe).transform_to(cirs)
cirs3 = cirscart.transform_to(altazframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_cirs_to_hadec():
"""
Check the basic CIRS<->HADec transforms.
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
cirs = CIRS(usph, obstime="J2000")
crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist)
cirscart = CIRS(
crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
hadecframe = HADec(location=loc, obstime=Time("J2005"))
cirs2 = cirs.transform_to(hadecframe).transform_to(cirs)
cirs3 = cirscart.transform_to(hadecframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_itrs_topo_to_altaz_with_refraction():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
dist = np.linspace(1.0, 1000.0, len(usph)) * u.au
icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
altaz_frame1 = AltAz(obstime="J2000", location=loc)
altaz_frame2 = AltAz(
obstime="J2000", location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5
)
cirs_frame = CIRS(obstime="J2000", location=loc)
itrs_frame = ITRS(location=loc)
# Normal route
# No Refraction
altaz1 = icrs.transform_to(altaz_frame1)
# Refraction added
altaz2 = icrs.transform_to(altaz_frame2)
# Refraction removed
cirs = altaz2.transform_to(cirs_frame)
altaz3 = cirs.transform_to(altaz_frame1)
# Through ITRS
# No Refraction
itrs = icrs.transform_to(itrs_frame)
altaz11 = itrs.transform_to(altaz_frame1)
assert_allclose(altaz11.az - altaz1.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz11.alt - altaz1.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz11.distance - altaz1.distance, 0 * u.cm, atol=10.0 * u.cm)
# Round trip
itrs11 = altaz11.transform_to(itrs_frame)
assert_allclose(itrs11.x, itrs.x)
assert_allclose(itrs11.y, itrs.y)
assert_allclose(itrs11.z, itrs.z)
# Refraction added
altaz22 = itrs.transform_to(altaz_frame2)
assert_allclose(altaz22.az - altaz2.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz22.alt - altaz2.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz22.distance - altaz2.distance, 0 * u.cm, atol=10.0 * u.cm)
# Refraction removed
itrs = altaz22.transform_to(itrs_frame)
altaz33 = itrs.transform_to(altaz_frame1)
assert_allclose(altaz33.az - altaz3.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz33.alt - altaz3.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz33.distance - altaz3.distance, 0 * u.cm, atol=10.0 * u.cm)
def test_itrs_topo_to_hadec_with_refraction():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
dist = np.linspace(1.0, 1000.0, len(usph)) * u.au
icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
hadec_frame1 = HADec(obstime="J2000", location=loc)
hadec_frame2 = HADec(
obstime="J2000", location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5
)
cirs_frame = CIRS(obstime="J2000", location=loc)
itrs_frame = ITRS(location=loc)
# Normal route
# No Refraction
hadec1 = icrs.transform_to(hadec_frame1)
# Refraction added
hadec2 = icrs.transform_to(hadec_frame2)
# Refraction removed
cirs = hadec2.transform_to(cirs_frame)
hadec3 = cirs.transform_to(hadec_frame1)
# Through ITRS
# No Refraction
itrs = icrs.transform_to(itrs_frame)
hadec11 = itrs.transform_to(hadec_frame1)
assert_allclose(hadec11.ha - hadec1.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec11.dec - hadec1.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec11.distance - hadec1.distance, 0 * u.cm, atol=10.0 * u.cm)
# Round trip
itrs11 = hadec11.transform_to(itrs_frame)
assert_allclose(itrs11.x, itrs.x)
assert_allclose(itrs11.y, itrs.y)
assert_allclose(itrs11.z, itrs.z)
# Refraction added
hadec22 = itrs.transform_to(hadec_frame2)
assert_allclose(hadec22.ha - hadec2.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec22.dec - hadec2.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec22.distance - hadec2.distance, 0 * u.cm, atol=10.0 * u.cm)
# Refraction removed
itrs = hadec22.transform_to(itrs_frame)
hadec33 = itrs.transform_to(hadec_frame1)
assert_allclose(hadec33.ha - hadec3.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec33.dec - hadec3.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec33.distance - hadec3.distance, 0 * u.cm, atol=10.0 * u.cm)
def test_gcrs_itrs():
"""
Check basic GCRS<->ITRS transforms for round-tripping.
"""
usph = golden_spiral_grid(200)
gcrs = GCRS(usph, obstime="J2000")
gcrs6 = GCRS(usph, obstime="J2006")
gcrs2 = gcrs.transform_to(ITRS()).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(ITRS()).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
# these should be different:
assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8)
assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8)
# also try with the cartesian representation
gcrsc = gcrs.realize_frame(gcrs.data)
gcrsc.representation_type = CartesianRepresentation
gcrsc2 = gcrsc.transform_to(ITRS()).transform_to(gcrsc)
assert_allclose(gcrsc.spherical.lon, gcrsc2.ra)
assert_allclose(gcrsc.spherical.lat, gcrsc2.dec)
def test_cirs_itrs():
"""
Check basic CIRS<->ITRS geocentric transforms for round-tripping.
"""
usph = golden_spiral_grid(200)
cirs = CIRS(usph, obstime="J2000")
cirs6 = CIRS(usph, obstime="J2006")
cirs2 = cirs.transform_to(ITRS()).transform_to(cirs)
cirs6_2 = cirs6.transform_to(ITRS()).transform_to(cirs) # different obstime
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_cirs_itrs_topo():
"""
Check basic CIRS<->ITRS topocentric transforms for round-tripping.
"""
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
cirs = CIRS(usph, obstime="J2000", location=loc)
cirs6 = CIRS(usph, obstime="J2006", location=loc)
cirs2 = cirs.transform_to(ITRS(location=loc)).transform_to(cirs)
# different obstime
cirs6_2 = cirs6.transform_to(ITRS(location=loc)).transform_to(cirs)
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_gcrs_cirs():
"""
Check GCRS<->CIRS transforms for round-tripping. More complicated than the
above two because it's multi-hop
"""
usph = golden_spiral_grid(200)
gcrs = GCRS(usph, obstime="J2000")
gcrs6 = GCRS(usph, obstime="J2006")
gcrs2 = gcrs.transform_to(CIRS()).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(CIRS()).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
# these should be different:
assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8)
assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8)
# now try explicit intermediate pathways and ensure they're all consistent
gcrs3 = (
gcrs.transform_to(ITRS())
.transform_to(CIRS())
.transform_to(ITRS())
.transform_to(gcrs)
)
assert_allclose(gcrs.ra, gcrs3.ra)
assert_allclose(gcrs.dec, gcrs3.dec)
gcrs4 = (
gcrs.transform_to(ICRS())
.transform_to(CIRS())
.transform_to(ICRS())
.transform_to(gcrs)
)
assert_allclose(gcrs.ra, gcrs4.ra)
assert_allclose(gcrs.dec, gcrs4.dec)
def test_gcrs_altaz():
"""
Check GCRS<->AltAz transforms for round-tripping. Has multiple paths
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(128)
gcrs = GCRS(usph, obstime="J2000")[None] # broadcast with times below
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format="jd")[:, None]
loc = EarthLocation(lon=10 * u.deg, lat=80.0 * u.deg)
aaframe = AltAz(obstime=times, location=loc)
aa1 = gcrs.transform_to(aaframe)
aa2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(aaframe)
aa3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(aaframe)
# make sure they're all consistent
assert_allclose(aa1.alt, aa2.alt)
assert_allclose(aa1.az, aa2.az)
assert_allclose(aa1.alt, aa3.alt)
assert_allclose(aa1.az, aa3.az)
def test_gcrs_hadec():
"""
Check GCRS<->HADec transforms for round-tripping. Has multiple paths
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(128)
gcrs = GCRS(usph, obstime="J2000") # broadcast with times below
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format="jd")[:, None]
loc = EarthLocation(lon=10 * u.deg, lat=80.0 * u.deg)
hdframe = HADec(obstime=times, location=loc)
hd1 = gcrs.transform_to(hdframe)
hd2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(hdframe)
hd3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(hdframe)
# make sure they're all consistent
assert_allclose(hd1.dec, hd2.dec)
assert_allclose(hd1.ha, hd2.ha)
assert_allclose(hd1.dec, hd3.dec)
assert_allclose(hd1.ha, hd3.ha)
def test_precessed_geocentric():
assert PrecessedGeocentric().equinox.jd == Time("J2000").jd
gcrs_coo = GCRS(180 * u.deg, 2 * u.deg, distance=10000 * u.km)
pgeo_coo = gcrs_coo.transform_to(PrecessedGeocentric())
assert np.abs(gcrs_coo.ra - pgeo_coo.ra) > 10 * u.marcsec
assert np.abs(gcrs_coo.dec - pgeo_coo.dec) > 10 * u.marcsec
assert_allclose(gcrs_coo.distance, pgeo_coo.distance)
gcrs_roundtrip = pgeo_coo.transform_to(GCRS())
assert_allclose(gcrs_coo.ra, gcrs_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs_roundtrip.distance)
pgeo_coo2 = gcrs_coo.transform_to(PrecessedGeocentric(equinox="B1850"))
assert np.abs(gcrs_coo.ra - pgeo_coo2.ra) > 1.5 * u.deg
assert np.abs(gcrs_coo.dec - pgeo_coo2.dec) > 0.5 * u.deg
assert_allclose(gcrs_coo.distance, pgeo_coo2.distance)
gcrs2_roundtrip = pgeo_coo2.transform_to(GCRS())
assert_allclose(gcrs_coo.ra, gcrs2_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs2_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs2_roundtrip.distance)
def test_precessed_geocentric_different_obstime():
# Create two PrecessedGeocentric frames with different obstime
precessedgeo1 = PrecessedGeocentric(obstime="2021-09-07")
precessedgeo2 = PrecessedGeocentric(obstime="2021-06-07")
# GCRS->PrecessedGeocentric should give different results for the two frames
gcrs_coord = GCRS(10 * u.deg, 20 * u.deg, 3 * u.AU, obstime=precessedgeo1.obstime)
pg_coord1 = gcrs_coord.transform_to(precessedgeo1)
pg_coord2 = gcrs_coord.transform_to(precessedgeo2)
assert not pg_coord1.is_equivalent_frame(pg_coord2)
assert not allclose(pg_coord1.cartesian.xyz, pg_coord2.cartesian.xyz)
# Looping back to GCRS should return the original coordinate
loopback1 = pg_coord1.transform_to(gcrs_coord)
loopback2 = pg_coord2.transform_to(gcrs_coord)
assert loopback1.is_equivalent_frame(gcrs_coord)
assert loopback2.is_equivalent_frame(gcrs_coord)
assert_allclose(loopback1.cartesian.xyz, gcrs_coord.cartesian.xyz)
assert_allclose(loopback2.cartesian.xyz, gcrs_coord.cartesian.xyz)
# shared by parametrized tests below. Some use the whole AltAz, others use just obstime
totest_frames = [
# J2000 is often a default so this might work when others don't
AltAz(location=EarthLocation(-90 * u.deg, 65 * u.deg), obstime=Time("J2000")),
AltAz(location=EarthLocation(120 * u.deg, -35 * u.deg), obstime=Time("J2000")),
AltAz(
location=EarthLocation(-90 * u.deg, 65 * u.deg),
obstime=Time("2014-01-01 00:00:00"),
),
AltAz(
location=EarthLocation(-90 * u.deg, 65 * u.deg),
obstime=Time("2014-08-01 08:00:00"),
),
AltAz(
location=EarthLocation(120 * u.deg, -35 * u.deg),
obstime=Time("2014-01-01 00:00:00"),
),
]
MOONDIST = 385000 * u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(
3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST
)
# roughly earth orbital eccentricity, but with an added tolerance
EARTHECC = 0.017 + 0.005
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_sunish(testframe):
"""
Sanity-check that the sun is at a reasonable distance from any altaz
"""
sun = get_sun(testframe.obstime)
assert sun.frame.name == "gcrs"
# the .to(u.au) is not necessary, it just makes the asserts on failure more readable
assert (EARTHECC - 1) * u.au < sun.distance.to(u.au) < (EARTHECC + 1) * u.au
sunaa = sun.transform_to(testframe)
assert (EARTHECC - 1) * u.au < sunaa.distance.to(u.au) < (EARTHECC + 1) * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a GCRS->AltAz transformation
"""
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000 * u.km < np.abs(moonaa.distance - moon.distance).to(u.au) < 7000 * u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
# also should add checks that the alt/az are different for different earth locations
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_bothroutes(testframe):
"""
Repeat of both the moonish and sunish tests above to make sure the two
routes through the coordinate graph are consistent with each other
"""
sun = get_sun(testframe.obstime)
sunaa_viaicrs = sun.transform_to(ICRS()).transform_to(testframe)
sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(
testframe
)
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa_viaicrs = moon.transform_to(ICRS()).transform_to(testframe)
moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(
testframe
)
assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz)
assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a CIRS<->AltAz transformation
"""
moon = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
assert 1000 * u.km < np.abs(moonaa.distance - moon.distance).to(u.km) < 7000 * u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_altaz_nodist(testframe):
"""
Check that a UnitSphericalRepresentation coordinate round-trips for the
CIRS<->AltAz transformation.
"""
coo0 = CIRS(
UnitSphericalRepresentation(10 * u.deg, 20 * u.deg), obstime=testframe.obstime
)
# check that it round-trips
coo1 = coo0.transform_to(testframe).transform_to(coo0)
assert_allclose(coo0.cartesian.xyz, coo1.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from CIRS
"""
moonish = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS())
assert 0.97 * u.au < moonicrs.distance < 1.03 * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from GCRS
"""
moonish = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS())
assert 0.97 * u.au < moonicrs.distance < 1.03 * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_icrs_gcrscirs_sunish(testframe):
"""
check that the ICRS barycenter goes to about the right distance from various
~geocentric frames (other than testframe)
"""
# slight offset to avoid divide-by-zero errors
icrs = ICRS(0 * u.deg, 0 * u.deg, distance=10 * u.km)
gcrs = icrs.transform_to(GCRS(obstime=testframe.obstime))
assert (EARTHECC - 1) * u.au < gcrs.distance.to(u.au) < (EARTHECC + 1) * u.au
cirs = icrs.transform_to(CIRS(obstime=testframe.obstime))
assert (EARTHECC - 1) * u.au < cirs.distance.to(u.au) < (EARTHECC + 1) * u.au
itrs = icrs.transform_to(ITRS(obstime=testframe.obstime))
assert (
(EARTHECC - 1) * u.au < itrs.spherical.distance.to(u.au) < (EARTHECC + 1) * u.au
)
@pytest.mark.parametrize("testframe", totest_frames)
def test_icrs_altaz_moonish(testframe):
"""
Check that something expressed in *ICRS* as being moon-like goes to the
right AltAz distance
"""
# we use epv00 instead of get_sun because get_sun includes aberration
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(testframe.obstime, "tdb"))
earth_icrs_xyz = earth_pv_bary[0] * u.au
moonoffset = [0, 0, MOONDIST.value] * MOONDIST.unit
moonish_icrs = ICRS(CartesianRepresentation(earth_icrs_xyz + moonoffset))
moonaa = moonish_icrs.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000 * u.km < np.abs(moonaa.distance - MOONDIST).to(u.au) < 7000 * u.km
def test_gcrs_self_transform_closeby():
"""
Tests GCRS self transform for objects which are nearby and thus
have reasonable parallax.
Moon positions were originally created using JPL DE432s ephemeris.
The two lunar positions (one geocentric, one at a defined location)
are created via a transformation from ICRS to two different GCRS frames.
We test that the GCRS-GCRS self transform can correctly map one GCRS
frame onto the other.
"""
t = Time("2014-12-25T07:00")
moon_geocentric = SkyCoord(
GCRS(
318.10579159 * u.deg,
-11.65281165 * u.deg,
365042.64880308 * u.km,
obstime=t,
)
)
# this is the location of the Moon as seen from La Palma
obsgeoloc = [-5592982.59658935, -63054.1948592, 3059763.90102216] * u.m
obsgeovel = [4.59798494, -407.84677071, 0.0] * u.m / u.s
moon_lapalma = SkyCoord(
GCRS(
318.7048445 * u.deg,
-11.98761996 * u.deg,
369722.8231031 * u.km,
obstime=t,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel,
)
)
transformed = moon_geocentric.transform_to(moon_lapalma.frame)
delta = transformed.separation_3d(moon_lapalma)
assert_allclose(delta, 0.0 * u.m, atol=1 * u.m)
def test_teme_itrf():
"""
Test case transform from TEME to ITRF.
Test case derives from example on appendix C of Vallado, Crawford, Hujsak & Kelso (2006).
See https://celestrak.com/publications/AIAA/2006-6753/AIAA-2006-6753-Rev2.pdf
"""
v_itrf = CartesianDifferential(
-3.225636520, -2.872451450, 5.531924446, unit=u.km / u.s
)
p_itrf = CartesianRepresentation(
-1033.479383,
7901.2952740,
6380.35659580,
unit=u.km,
differentials={"s": v_itrf},
)
t = Time("2004-04-06T07:51:28.386")
teme = ITRS(p_itrf, obstime=t).transform_to(TEME(obstime=t))
v_teme = CartesianDifferential(
-4.746131487, 0.785818041, 5.531931288, unit=u.km / u.s
)
p_teme = CartesianRepresentation(
5094.18016210,
6127.64465050,
6380.34453270,
unit=u.km,
differentials={"s": v_teme},
)
assert_allclose(
teme.cartesian.without_differentials().xyz,
p_teme.without_differentials().xyz,
atol=30 * u.cm,
)
assert_allclose(
teme.cartesian.differentials["s"].d_xyz,
p_teme.differentials["s"].d_xyz,
atol=1.0 * u.cm / u.s,
)
# test round trip
itrf = teme.transform_to(ITRS(obstime=t))
assert_allclose(
itrf.cartesian.without_differentials().xyz,
p_itrf.without_differentials().xyz,
atol=100 * u.cm,
)
assert_allclose(
itrf.cartesian.differentials["s"].d_xyz,
p_itrf.differentials["s"].d_xyz,
atol=1 * u.cm / u.s,
)
def test_precessedgeocentric_loopback():
from_coo = PrecessedGeocentric(
1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-01-01", equinox="2001-01-01"
)
# Change just the obstime
to_frame = PrecessedGeocentric(obstime="2001-06-30", equinox="2001-01-01")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the coordinate
assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10)
assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10)
assert not allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10)
assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10)
assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
# Change just the equinox
to_frame = PrecessedGeocentric(obstime="2001-01-01", equinox="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the direction but not the distance
assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10)
assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10)
assert allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10)
assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10)
assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
def test_teme_loopback():
from_coo = TEME(1 * u.AU, 2 * u.AU, 3 * u.AU, obstime="2001-01-01")
to_frame = TEME(obstime="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the coordinate
assert not allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
@pytest.mark.remote_data
def test_earth_orientation_table(monkeypatch):
"""Check that we can set the IERS table used as Earth Reference.
Use the here and now to be sure we get a difference.
"""
monkeypatch.setattr("astropy.utils.iers.conf.auto_download", True)
t = Time.now()
location = EarthLocation(lat=0 * u.deg, lon=0 * u.deg)
altaz = AltAz(location=location, obstime=t)
sc = SkyCoord(1 * u.deg, 2 * u.deg)
# Default: uses IERS_Auto, which will give a prediction.
# Note: tests run with warnings turned into errors, so it is
# meaningful if this passes.
if CI:
with warnings.catch_warnings():
# Server occasionally blocks IERS download in CI.
warnings.filterwarnings("ignore", message=r".*using local IERS-B.*")
# This also captures unclosed socket warning that is ignored in setup.cfg
warnings.filterwarnings("ignore", message=r".*unclosed.*")
altaz_auto = sc.transform_to(altaz)
else:
altaz_auto = sc.transform_to(altaz) # No warnings
with iers.earth_orientation_table.set(iers.IERS_B.open()):
with pytest.warns(AstropyWarning, match="after IERS data"):
altaz_b = sc.transform_to(altaz)
sep_b_auto = altaz_b.separation(altaz_auto)
assert_allclose(sep_b_auto, 0.0 * u.deg, atol=1 * u.arcsec)
assert sep_b_auto > 10 * u.microarcsecond
# Check we returned to regular IERS system.
altaz_auto2 = sc.transform_to(altaz)
assert_allclose(altaz_auto2.separation(altaz_auto), 0 * u.deg)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_ephemerides():
"""
We test that using different ephemerides gives very similar results
for transformations
"""
t = Time("2014-12-25T07:00")
moon = SkyCoord(
GCRS(
318.10579159 * u.deg,
-11.65281165 * u.deg,
365042.64880308 * u.km,
obstime=t,
)
)
icrs_frame = ICRS()
hcrs_frame = HCRS(obstime=t)
ecl_frame = HeliocentricMeanEcliptic(equinox=t)
cirs_frame = CIRS(obstime=t)
moon_icrs_builtin = moon.transform_to(icrs_frame)
moon_hcrs_builtin = moon.transform_to(hcrs_frame)
moon_helioecl_builtin = moon.transform_to(ecl_frame)
moon_cirs_builtin = moon.transform_to(cirs_frame)
with solar_system_ephemeris.set("jpl"):
moon_icrs_jpl = moon.transform_to(icrs_frame)
moon_hcrs_jpl = moon.transform_to(hcrs_frame)
moon_helioecl_jpl = moon.transform_to(ecl_frame)
moon_cirs_jpl = moon.transform_to(cirs_frame)
# most transformations should differ by an amount which is
# non-zero but of order milliarcsecs
sep_icrs = moon_icrs_builtin.separation(moon_icrs_jpl)
sep_hcrs = moon_hcrs_builtin.separation(moon_hcrs_jpl)
sep_helioecl = moon_helioecl_builtin.separation(moon_helioecl_jpl)
sep_cirs = moon_cirs_builtin.separation(moon_cirs_jpl)
assert_allclose([sep_icrs, sep_hcrs, sep_helioecl], 0.0 * u.deg, atol=10 * u.mas)
assert all(
sep > 10 * u.microarcsecond for sep in (sep_icrs, sep_hcrs, sep_helioecl)
)
# CIRS should be the same
assert_allclose(sep_cirs, 0.0 * u.deg, atol=1 * u.microarcsecond)
def test_tete_transforms():
"""
We test the TETE transforms for proper behaviour here.
The TETE transforms are tested for accuracy against JPL Horizons in
test_solar_system.py. Here we are looking to check for consistency and
errors in the self transform.
"""
loc = EarthLocation.from_geodetic("-22°57'35.1", "-67°47'14.1", 5186 * u.m)
time = Time("2020-04-06T00:00")
p, v = loc.get_gcrs_posvel(time)
gcrs_frame = GCRS(obstime=time, obsgeoloc=p, obsgeovel=v)
moon = SkyCoord(
169.24113968 * u.deg,
10.86086666 * u.deg,
358549.25381755 * u.km,
frame=gcrs_frame,
)
tete_frame = TETE(obstime=time, location=loc)
# need to set obsgeoloc/vel explicitly or skycoord behaviour over-writes
tete_geo = TETE(obstime=time, location=EarthLocation(*([0, 0, 0] * u.km)))
# test self-transform by comparing to GCRS-TETE-ITRS-TETE route
tete_coo1 = moon.transform_to(tete_frame)
tete_coo2 = moon.transform_to(tete_geo)
assert_allclose(tete_coo1.separation_3d(tete_coo2), 0 * u.mm, atol=1 * u.mm)
# test TETE-ITRS transform by comparing GCRS-CIRS-ITRS to GCRS-TETE-ITRS
itrs1 = moon.transform_to(CIRS()).transform_to(ITRS())
itrs2 = moon.transform_to(TETE()).transform_to(ITRS())
assert_allclose(itrs1.separation_3d(itrs2), 0 * u.mm, atol=1 * u.mm)
# test round trip GCRS->TETE->GCRS
new_moon = moon.transform_to(TETE()).transform_to(moon)
assert_allclose(new_moon.separation_3d(moon), 0 * u.mm, atol=1 * u.mm)
# test round trip via ITRS
tete_rt = tete_coo1.transform_to(ITRS(obstime=time)).transform_to(tete_coo1)
assert_allclose(tete_rt.separation_3d(tete_coo1), 0 * u.mm, atol=1 * u.mm)
def test_straight_overhead():
"""
With a precise CIRS<->Observed transformation this should give Alt=90 exactly
If the CIRS self-transform breaks it won't, due to improper treatment of aberration
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=10.0 * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=0.0 * u.km)
# An object that appears straight overhead - FOR A GEOCENTRIC OBSERVER.
# Note, this won't be overhead for a topocentric observer because of
# aberration.
cirs_geo = obj.get_itrs(t).transform_to(CIRS(obstime=t))
# now get the Geocentric CIRS position of observatory
obsrepr = home.get_itrs(t).transform_to(CIRS(obstime=t)).cartesian
# topocentric CIRS position of a straight overhead object
cirs_repr = cirs_geo.cartesian - obsrepr
# create a CIRS object that appears straight overhead for a TOPOCENTRIC OBSERVER
topocentric_cirs_frame = CIRS(obstime=t, location=home)
cirs_topo = topocentric_cirs_frame.realize_frame(cirs_repr)
# Check AltAz (though Azimuth can be anything so is not tested).
aa = cirs_topo.transform_to(AltAz(obstime=t, location=home))
assert_allclose(aa.alt, 90 * u.deg, atol=1 * u.uas, rtol=0)
# Check HADec.
hd = cirs_topo.transform_to(HADec(obstime=t, location=home))
assert_allclose(hd.ha, 0 * u.hourangle, atol=1 * u.uas, rtol=0)
assert_allclose(hd.dec, 52 * u.deg, atol=1 * u.uas, rtol=0)
def test_itrs_straight_overhead():
"""
With a precise ITRS<->Observed transformation this should give Alt=90 exactly
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=10.0 * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=0.0 * u.km)
# Check AltAz (though Azimuth can be anything so is not tested).
aa = obj.get_itrs(t, location=home).transform_to(AltAz(obstime=t, location=home))
assert_allclose(aa.alt, 90 * u.deg, atol=1 * u.uas, rtol=0)
# Check HADec.
hd = obj.get_itrs(t, location=home).transform_to(HADec(obstime=t, location=home))
assert_allclose(hd.ha, 0 * u.hourangle, atol=1 * u.uas, rtol=0)
assert_allclose(hd.dec, 52 * u.deg, atol=1 * u.uas, rtol=0)
def jplephem_ge(minversion):
"""Check if jplephem is installed and has version >= minversion."""
# This is a separate routine since somehow with pyinstaller the stanza
# not HAS_JPLEPHEM or metadata.version('jplephem') < '2.15'
# leads to a module not found error.
try:
return HAS_JPLEPHEM and metadata.version("jplephem") >= minversion
except Exception:
return False
@pytest.mark.remote_data
@pytest.mark.skipif(not jplephem_ge("2.15"), reason="requires jplephem >= 2.15")
def test_aa_hd_high_precision():
"""These tests are provided by @mkbrewer - see issue #10356.
The code that produces them agrees very well (<0.5 mas) with SkyField once Polar motion
is turned off, but SkyField does not include polar motion, so a comparison to Skyfield
or JPL Horizons will be ~1" off.
The absence of polar motion within Skyfield and the disagreement between Skyfield and Horizons
make high precision comparisons to those codes difficult.
Updated 2020-11-29, after the comparison between codes became even better,
down to 100 nas.
Updated 2023-02-14, after IERS changes the IERS B format and analysis,
causing small deviations.
NOTE: the agreement reflects consistency in approach between two codes,
not necessarily absolute precision. If this test starts failing, the
tolerance can and should be weakened *if* it is clear that the change is
due to an improvement (e.g., a new IAU precession model).
"""
lat = -22.959748 * u.deg
lon = -67.787260 * u.deg
elev = 5186 * u.m
loc = EarthLocation.from_geodetic(lon, lat, elev)
# Note: at this level of precision for the comparison, we have to include
# the location in the time, as it influences the transformation to TDB.
t = Time("2017-04-06T00:00:00.0", location=loc)
with solar_system_ephemeris.set("de430"):
moon = get_body("moon", t, loc)
moon_aa = moon.transform_to(AltAz(obstime=t, location=loc))
moon_hd = moon.transform_to(HADec(obstime=t, location=loc))
# Numbers from
# https://github.com/astropy/astropy/pull/11073#issuecomment-735486271
# updated in https://github.com/astropy/astropy/issues/11683
# and again after the IERS_B change.
TARGET_AZ, TARGET_EL = 15.032673662647138 * u.deg, 50.303110087520054 * u.deg
TARGET_DISTANCE = 376252.88325051306 * u.km
assert_allclose(moon_aa.az, TARGET_AZ, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_aa.alt, TARGET_EL, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_aa.distance, TARGET_DISTANCE, atol=0.1 * u.mm, rtol=0)
ha, dec = erfa.ae2hd(
moon_aa.az.to_value(u.radian),
moon_aa.alt.to_value(u.radian),
lat.to_value(u.radian),
)
ha = u.Quantity(ha, u.radian, copy=False)
dec = u.Quantity(dec, u.radian, copy=False)
assert_allclose(moon_hd.ha, ha, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_hd.dec, dec, atol=0.1 * u.uas, rtol=0)
def test_aa_high_precision_nodata():
"""
These tests are designed to ensure high precision alt-az transforms.
They are a slight fudge since the target values come from astropy itself. They are generated
with a version of the code that passes the tests above, but for the internal solar system
ephemerides to avoid the use of remote data.
"""
# Last updated when the new IERS B format and analysis was introduced.
TARGET_AZ, TARGET_EL = 15.0323151 * u.deg, 50.30271925 * u.deg
lat = -22.959748 * u.deg
lon = -67.787260 * u.deg
elev = 5186 * u.m
loc = EarthLocation.from_geodetic(lon, lat, elev)
t = Time("2017-04-06T00:00:00.0")
moon = get_body("moon", t, loc)
moon_aa = moon.transform_to(AltAz(obstime=t, location=loc))
assert_allclose(moon_aa.az - TARGET_AZ, 0 * u.mas, atol=0.5 * u.mas)
assert_allclose(moon_aa.alt - TARGET_EL, 0 * u.mas, atol=0.5 * u.mas)
class TestGetLocationGCRS:
# TETE and CIRS use get_location_gcrs to get obsgeoloc and obsgeovel
# with knowledge of some of the matrices. Check that this is consistent
# with a direct transformation.
def setup_class(cls):
cls.loc = loc = EarthLocation.from_geodetic(
np.linspace(0, 360, 6) * u.deg, np.linspace(-90, 90, 6) * u.deg, 100 * u.m
)
cls.obstime = obstime = Time(np.linspace(2000, 2010, 6), format="jyear")
# Get comparison via a full transformation. We do not use any methods
# of EarthLocation, since those depend on the fast transform.
loc_itrs = ITRS(loc.x, loc.y, loc.z, obstime=obstime)
zeros = np.broadcast_to(0.0 * (u.km / u.s), (3,) + loc_itrs.shape, subok=True)
loc_itrs.data.differentials["s"] = CartesianDifferential(zeros)
loc_gcrs_cart = loc_itrs.transform_to(GCRS(obstime=obstime)).cartesian
cls.obsgeoloc = loc_gcrs_cart.without_differentials()
cls.obsgeovel = loc_gcrs_cart.differentials["s"].to_cartesian()
def check_obsgeo(self, obsgeoloc, obsgeovel):
assert_allclose(obsgeoloc.xyz, self.obsgeoloc.xyz, atol=0.1 * u.um, rtol=0.0)
assert_allclose(
obsgeovel.xyz, self.obsgeovel.xyz, atol=0.1 * u.mm / u.s, rtol=0.0
)
def test_get_gcrs_posvel(self):
# Really just a sanity check
self.check_obsgeo(*self.loc.get_gcrs_posvel(self.obstime))
def test_tete_quick(self):
# Following copied from intermediate_rotation_transforms.gcrs_to_tete
rbpn = erfa.pnm06a(*get_jd12(self.obstime, "tt"))
loc_gcrs_frame = get_location_gcrs(
self.loc, self.obstime, tete_to_itrs_mat(self.obstime, rbpn=rbpn), rbpn
)
self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel)
def test_cirs_quick(self):
cirs_frame = CIRS(location=self.loc, obstime=self.obstime)
# Following copied from intermediate_rotation_transforms.gcrs_to_cirs
pmat = gcrs_to_cirs_mat(cirs_frame.obstime)
loc_gcrs_frame = get_location_gcrs(
self.loc, self.obstime, cirs_to_itrs_mat(cirs_frame.obstime), pmat
)
self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel)
|
c221e3a448c5177f271dc4128777d7973f84837c94b8662fe6f3e55bdd58285a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the SkyCoord class. Note that there are also SkyCoord tests in
test_api_ape5.py
"""
import copy
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pytest
from erfa import ErfaWarning
from astropy import units as u
from astropy.coordinates import (
FK4,
FK5,
GCRS,
ICRS,
AltAz,
Angle,
Attribute,
BaseCoordinateFrame,
CartesianRepresentation,
EarthLocation,
Galactic,
Latitude,
RepresentationMapping,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
frame_transform_graph,
)
from astropy.coordinates.representation import (
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
)
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.coordinates.transformations import FunctionTransform
from astropy.io import fits
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils import isiterable
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.wcs import WCS
RA = 1.0 * u.deg
DEC = 2.0 * u.deg
C_ICRS = ICRS(RA, DEC)
C_FK5 = C_ICRS.transform_to(FK5())
J2001 = Time("J2001")
def allclose(a, b, rtol=0.0, atol=None):
if atol is None:
atol = 1.0e-8 * getattr(a, "unit", 1.0)
return quantity_allclose(a, b, rtol, atol)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def test_is_transformable_to_str_input():
"""Test method ``is_transformable_to`` with string input.
The only difference from the frame method of the same name is that
strings are allowed. As the frame tests cover ``is_transform_to``, here
we only test the added string option.
"""
# make example SkyCoord
c = SkyCoord(90 * u.deg, -11 * u.deg)
# iterate through some frames, checking consistency
names = frame_transform_graph.get_names()
for name in names:
frame = frame_transform_graph.lookup_name(name)()
assert c.is_transformable_to(name) == c.is_transformable_to(frame)
def test_transform_to():
for frame in (
FK5(),
FK5(equinox=Time("J1975.0")),
FK4(),
FK4(equinox=Time("J1975.0")),
SkyCoord(RA, DEC, frame="fk4", equinox="J1980"),
):
c_frame = C_ICRS.transform_to(frame)
s_icrs = SkyCoord(RA, DEC, frame="icrs")
s_frame = s_icrs.transform_to(frame)
assert allclose(c_frame.ra, s_frame.ra)
assert allclose(c_frame.dec, s_frame.dec)
assert allclose(c_frame.distance, s_frame.distance)
# set up for parametrized test
rt_sets = []
rt_frames = [ICRS, FK4, FK5, Galactic]
for rt_frame0 in rt_frames:
for rt_frame1 in rt_frames:
for equinox0 in (None, "J1975.0"):
for obstime0 in (None, "J1980.0"):
for equinox1 in (None, "J1975.0"):
for obstime1 in (None, "J1980.0"):
rt_sets.append(
(
rt_frame0,
rt_frame1,
equinox0,
equinox1,
obstime0,
obstime1,
)
)
rt_args = ("frame0", "frame1", "equinox0", "equinox1", "obstime0", "obstime1")
@pytest.mark.parametrize(rt_args, rt_sets)
def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1):
"""
Test round tripping out and back using transform_to in every combination.
"""
attrs0 = {"equinox": equinox0, "obstime": obstime0}
attrs1 = {"equinox": equinox1, "obstime": obstime1}
# Remove None values
attrs0 = {k: v for k, v in attrs0.items() if v is not None}
attrs1 = {k: v for k, v in attrs1.items() if v is not None}
# Go out and back
sc = SkyCoord(RA, DEC, frame=frame0, **attrs0)
# Keep only frame attributes for frame1
attrs1 = {
attr: val for attr, val in attrs1.items() if attr in frame1.frame_attributes
}
sc2 = sc.transform_to(frame1(**attrs1))
# When coming back only keep frame0 attributes for transform_to
attrs0 = {
attr: val for attr, val in attrs0.items() if attr in frame0.frame_attributes
}
# also, if any are None, fill in with defaults
for attrnm in frame0.frame_attributes:
if attrs0.get(attrnm, None) is None:
if attrnm == "obstime" and frame0.get_frame_attr_defaults()[attrnm] is None:
if "equinox" in attrs0:
attrs0[attrnm] = attrs0["equinox"]
else:
attrs0[attrnm] = frame0.get_frame_attr_defaults()[attrnm]
sc_rt = sc2.transform_to(frame0(**attrs0))
if frame0 is Galactic:
assert allclose(sc.l, sc_rt.l)
assert allclose(sc.b, sc_rt.b)
else:
assert allclose(sc.ra, sc_rt.ra)
assert allclose(sc.dec, sc_rt.dec)
if equinox0:
assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox
if obstime0:
assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime
def test_coord_init_string():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord("1d 2d")
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord("1d", "2d")
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord("1°2′3″", "2°3′4″")
assert allclose(sc.ra, Angle("1°2′3″"))
assert allclose(sc.dec, Angle("2°3′4″"))
sc = SkyCoord("1°2′3″ 2°3′4″")
assert allclose(sc.ra, Angle("1°2′3″"))
assert allclose(sc.dec, Angle("2°3′4″"))
with pytest.raises(ValueError) as err:
SkyCoord("1d 2d 3d")
assert "Cannot parse first argument data" in str(err.value)
sc1 = SkyCoord("8 00 00 +5 00 00.0", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc1, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc11 = SkyCoord("8h00m00s+5d00m00.0s", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc11, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc2 = SkyCoord("8 00 -5 00 00.0", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc2, SkyCoord)
assert allclose(sc2.ra, Angle(120 * u.deg))
assert allclose(sc2.dec, Angle(-5 * u.deg))
sc3 = SkyCoord("8 00 -5 00.6", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc3, SkyCoord)
assert allclose(sc3.ra, Angle(120 * u.deg))
assert allclose(sc3.dec, Angle(-5.01 * u.deg))
sc4 = SkyCoord("J080000.00-050036.00", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc4, SkyCoord)
assert allclose(sc4.ra, Angle(120 * u.deg))
assert allclose(sc4.dec, Angle(-5.01 * u.deg))
sc41 = SkyCoord("J080000+050036", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc41, SkyCoord)
assert allclose(sc41.ra, Angle(120 * u.deg))
assert allclose(sc41.dec, Angle(+5.01 * u.deg))
sc5 = SkyCoord("8h00.6m -5d00.6m", unit=(u.hour, u.deg), frame="icrs")
assert isinstance(sc5, SkyCoord)
assert allclose(sc5.ra, Angle(120.15 * u.deg))
assert allclose(sc5.dec, Angle(-5.01 * u.deg))
sc6 = SkyCoord("8h00.6m -5d00.6m", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc6, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord("8h00.6m-5d00.6m", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord("8h00.6-5d00.6", unit=(u.hour, u.deg), frame="fk4")
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg)
assert isinstance(sc7, SkyCoord)
assert allclose(sc7.ra, Angle(187.706 * u.deg))
assert allclose(sc7.dec, Angle(12.406 * u.deg))
with pytest.raises(ValueError):
SkyCoord("8 00 -5 00.6", unit=(u.deg, u.deg), frame="galactic")
def test_coord_init_unit():
"""
Test variations of the unit keyword.
"""
for unit in (
"deg",
"deg,deg",
" deg , deg ",
u.deg,
(u.deg, u.deg),
np.array(["deg", "deg"]),
):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(1 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in (
"hourangle",
"hourangle,hourangle",
" hourangle , hourangle ",
u.hourangle,
[u.hourangle, u.hourangle],
):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(30 * u.deg))
for unit in ("hourangle,deg", (u.hourangle, u.deg)):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ("deg,deg,deg,deg", [u.deg, u.deg, u.deg, u.deg], None):
with pytest.raises(ValueError) as err:
SkyCoord(1, 2, unit=unit)
assert "Unit keyword must have one to three unit values" in str(err.value)
for unit in ("m", (u.m, u.deg), ""):
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, unit=unit)
def test_coord_init_list():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord(
[("1d", "2d"), (1 * u.deg, 2 * u.deg), "1d 2d", ("1°", "2°"), "1° 2°"],
unit="deg",
)
assert allclose(sc.ra, Angle("1d"))
assert allclose(sc.dec, Angle("2d"))
with pytest.raises(ValueError) as err:
SkyCoord(["1d 2d 3d"])
assert "Cannot parse first argument data" in str(err.value)
with pytest.raises(ValueError) as err:
SkyCoord([("1d", "2d", "3d")])
assert "Cannot parse first argument data" in str(err.value)
sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg])
assert allclose(sc.ra, Angle("1d"))
assert allclose(sc.dec, Angle("2d"))
with pytest.raises(
ValueError,
match="One or more elements of input sequence does not have a length",
):
SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec
def test_coord_init_array():
"""
Input in the form of a list array or numpy array
"""
for a in (["1 2", "3 4"], [["1", "2"], ["3", "4"]], [[1, 2], [3, 4]]):
sc = SkyCoord(a, unit="deg")
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
sc = SkyCoord(np.array(a), unit="deg")
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
def test_coord_init_representation():
"""
Spherical or Cartesian representation input coordinates.
"""
coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc)
sc = SkyCoord(coord, frame="icrs")
assert allclose(sc.ra, coord.lon)
assert allclose(sc.dec, coord.lat)
assert allclose(sc.distance, coord.distance)
with pytest.raises(ValueError) as err:
SkyCoord(coord, frame="icrs", ra="1d")
assert "conflicts with keyword argument 'ra'" in str(err.value)
coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one)
sc = SkyCoord(coord, frame="icrs")
sc_cart = sc.represent_as(CartesianRepresentation)
assert allclose(sc_cart.x, 1.0)
assert allclose(sc_cart.y, 2.0)
assert allclose(sc_cart.z, 3.0)
def test_frame_init():
"""
Different ways of providing the frame.
"""
sc = SkyCoord(RA, DEC, frame="icrs")
assert sc.frame.name == "icrs"
sc = SkyCoord(RA, DEC, frame=ICRS)
assert sc.frame.name == "icrs"
sc = SkyCoord(sc)
assert sc.frame.name == "icrs"
sc = SkyCoord(C_ICRS)
assert sc.frame.name == "icrs"
SkyCoord(C_ICRS, frame="icrs")
assert sc.frame.name == "icrs"
with pytest.raises(ValueError) as err:
SkyCoord(C_ICRS, frame="galactic")
assert "Cannot override frame=" in str(err.value)
def test_equal():
obstime = "B1955"
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime=obstime)
sc2 = SkyCoord([1, 20] * u.deg, [3, 4] * u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 2] * u.km / u.s)
sc2 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, radial_velocity=[1, 20] * u.km / u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert isinstance(v := (sc1[0] == sc2[0]), (bool, np.bool_)) and v
assert isinstance(v := (sc1[0] != sc2[0]), (bool, np.bool_)) and not v
def test_equal_different_type():
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime="B1955")
# Test equals and not equals operators against different types
assert sc1 != "a string"
assert not (sc1 == "a string")
def test_equal_exceptions():
sc1 = SkyCoord(1 * u.deg, 2 * u.deg, obstime="B1955")
sc2 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(
ValueError,
match=(
"cannot compare: extra frame attribute 'obstime' is not equivalent"
r" \(perhaps compare the frames directly to avoid this exception\)"
),
):
sc1 == sc2 # noqa: B015
# Note that this exception is the only one raised directly in SkyCoord.
# All others come from lower-level classes and are tested in test_frames.py.
def test_attr_inheritance():
"""
When initializing from an existing coord the representation attrs like
equinox should be inherited to the SkyCoord. If there is a conflict
then raise an exception.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults
assert sc2.equinox != sc.equinox
assert sc2.obstime != sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
@pytest.mark.parametrize("frame", ["fk4", "fk5", "icrs"])
def test_setitem_no_velocity(frame):
"""Test different flavors of item setting for a SkyCoord without a velocity
for different frames. Include a frame attribute that is sometimes an
actual frame attribute and sometimes an extra frame attribute.
"""
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, obstime="B1955", frame=frame)
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg, obstime="B1955", frame=frame)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == Time("B1955")
assert sc1.frame.name == frame
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
def test_setitem_initially_broadcast():
sc = SkyCoord(np.ones((2, 1)) * u.deg, np.ones((1, 3)) * u.deg)
sc[1, 1] = SkyCoord(0 * u.deg, 0 * u.deg)
expected = np.ones((2, 3)) * u.deg
expected[1, 1] = 0.0
assert np.all(sc.ra == expected)
assert np.all(sc.dec == expected)
def test_setitem_velocities():
"""Test different flavors of item setting for a SkyCoord with a velocity."""
sc0 = SkyCoord(
[1, 2] * u.deg,
[3, 4] * u.deg,
radial_velocity=[1, 2] * u.km / u.s,
obstime="B1950",
frame="fk4",
)
sc2 = SkyCoord(
[10, 20] * u.deg,
[30, 40] * u.deg,
radial_velocity=[10, 20] * u.km / u.s,
obstime="B1950",
frame="fk4",
)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == Time("B1950")
assert sc1.frame.name == "fk4"
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
class SkyCoordSub(SkyCoord):
pass
obstime = "B1955"
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg, frame="fk4")
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg, frame="fk4", obstime=obstime)
sc1 = SkyCoordSub(sc0)
with pytest.raises(
TypeError,
match="an only set from object of same class: SkyCoordSub vs. SkyCoord",
):
sc1[0] = sc2[0]
sc1 = SkyCoord(sc0.ra, sc0.dec, frame="fk4", obstime="B2001")
with pytest.raises(
ValueError, match="can only set frame item from an equivalent frame"
):
sc1.frame[0] = sc2.frame[0]
sc1 = SkyCoord(sc0.ra[0], sc0.dec[0], frame="fk4", obstime=obstime)
with pytest.raises(
TypeError, match="scalar 'FK4' frame object does not support item assignment"
):
sc1[0] = sc2[0]
# Different differentials
sc1 = SkyCoord(
[1, 2] * u.deg,
[3, 4] * u.deg,
pm_ra_cosdec=[1, 2] * u.mas / u.yr,
pm_dec=[3, 4] * u.mas / u.yr,
)
sc2 = SkyCoord(
[10, 20] * u.deg, [30, 40] * u.deg, radial_velocity=[10, 20] * u.km / u.s
)
with pytest.raises(
TypeError,
match=(
"can only set from object of same class: "
"UnitSphericalCosLatDifferential vs. RadialDifferential"
),
):
sc1[0] = sc2[0]
def test_insert():
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc1 = SkyCoord(5 * u.deg, 6 * u.deg)
sc3 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
sc5 = SkyCoord([[10, 2], [30, 4]] * u.deg, [[50, 6], [70, 8]] * u.deg)
# Insert a scalar
sc = sc0.insert(1, sc1)
assert skycoord_equal(sc, SkyCoord([1, 5, 2] * u.deg, [3, 6, 4] * u.deg))
# Insert length=2 array at start of array
sc = sc0.insert(0, sc3)
assert skycoord_equal(sc, SkyCoord([10, 20, 1, 2] * u.deg, [30, 40, 3, 4] * u.deg))
# Insert length=2 array at end of array
sc = sc0.insert(2, sc3)
assert skycoord_equal(sc, SkyCoord([1, 2, 10, 20] * u.deg, [3, 4, 30, 40] * u.deg))
# Multidimensional
sc = sc4.insert(1, sc5)
assert skycoord_equal(
sc,
SkyCoord(
[[1, 2], [10, 2], [30, 4], [3, 4]] * u.deg,
[[5, 6], [50, 6], [70, 8], [7, 8]] * u.deg,
),
)
def test_insert_exceptions():
sc0 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc1 = SkyCoord(5 * u.deg, 6 * u.deg)
# sc3 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]] * u.deg, [[5, 6], [7, 8]] * u.deg)
with pytest.raises(TypeError, match="cannot insert into scalar"):
sc1.insert(0, sc0)
with pytest.raises(ValueError, match="axis must be 0"):
sc0.insert(0, sc1, axis=1)
with pytest.raises(TypeError, match="obj arg must be an integer"):
sc0.insert(slice(None), sc0)
with pytest.raises(
IndexError, match="index -100 is out of bounds for axis 0 with size 2"
):
sc0.insert(-100, sc0)
# Bad shape
with pytest.raises(
ValueError,
match=r"could not broadcast input array from shape \(2,2\) into shape \(2,?\)",
):
sc0.insert(0, sc4)
def test_attr_conflicts():
"""
Check conflicts resolution between coordinate attributes and init kwargs.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox="J1999", obstime="J2001")
# OK because sc.frame doesn't have obstime
SkyCoord(sc.frame, equinox="J1999", obstime="J2100")
# Not OK if attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox="J1999", obstime="J2002")
assert "Coordinate attribute 'obstime'=" in str(err.value)
# Same game but with fk4 which has equinox and obstime frame attrs
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox="J1999", obstime="J2001")
# Not OK if SkyCoord attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox="J1999", obstime="J2002")
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
# Not OK because sc.frame has different attrs
with pytest.raises(ValueError) as err:
SkyCoord(sc.frame, equinox="J1999", obstime="J2002")
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
def test_frame_attr_getattr():
"""
When accessing frame attributes like equinox, the value should come
from self.frame when that object has the relevant attribute, otherwise
from self.
"""
sc = SkyCoord(1, 2, frame="icrs", unit="deg", equinox="J1999", obstime="J2001")
assert sc.equinox == "J1999" # Just the raw value (not validated)
assert sc.obstime == "J2001"
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999", obstime="J2001")
assert sc.equinox == Time("J1999") # Coming from the self.frame object
assert sc.obstime == Time("J2001")
sc = SkyCoord(1, 2, frame="fk4", unit="deg", equinox="J1999")
assert sc.equinox == Time("J1999")
assert sc.obstime == Time("J1999")
def test_to_string():
"""
Basic testing of converting SkyCoord to strings. This just tests
for a single input coordinate and and 1-element list. It does not
test the underlying `Angle.to_string` method itself.
"""
coord = "1h2m3s 1d2m3s"
for wrap in (lambda x: x, lambda x: [x]):
sc = SkyCoord(wrap(coord))
assert sc.to_string() == wrap("15.5125 1.03417")
assert sc.to_string("dms") == wrap("15d30m45s 1d02m03s")
assert sc.to_string("hmsdms") == wrap("01h02m03s +01d02m03s")
with_kwargs = sc.to_string("hmsdms", precision=3, pad=True, alwayssign=True)
assert with_kwargs == wrap("+01h02m03.000s +01d02m03.000s")
@pytest.mark.parametrize("cls_other", [SkyCoord, ICRS])
def test_seps(cls_other):
sc1 = SkyCoord(0 * u.deg, 1 * u.deg)
sc2 = cls_other(0 * u.deg, 2 * u.deg)
sep = sc1.separation(sc2)
assert (sep - 1 * u.deg) / u.deg < 1e-10
with pytest.raises(ValueError):
sc1.separation_3d(sc2)
sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc)
sc4 = cls_other(1 * u.deg, 1 * u.deg, distance=2 * u.kpc)
sep3d = sc3.separation_3d(sc4)
assert sep3d == 1 * u.kpc
def test_repr():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame="icrs")
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame="icrs", distance=1 * u.kpc)
assert repr(sc1) == "<SkyCoord (ICRS): (ra, dec) in deg\n (0., 1.)>"
assert (
repr(sc2)
== "<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)\n (1., 1., 1.)>"
)
sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame="icrs")
assert repr(sc3).startswith("<SkyCoord (ICRS): (ra, dec) in deg\n")
sc_default = SkyCoord(0 * u.deg, 1 * u.deg)
assert repr(sc_default) == "<SkyCoord (ICRS): (ra, dec) in deg\n (0., 1.)>"
def test_repr_altaz():
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame="icrs", distance=1 * u.kpc)
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time("2005-03-21 00:00:00")
sc4 = sc2.transform_to(AltAz(location=loc, obstime=time))
assert repr(sc4).startswith(
"<SkyCoord (AltAz: obstime=2005-03-21 00:00:00.000, "
"location=(-2309223., -3695529., -4641767.) m, pressure=0.0 hPa, "
"temperature=0.0 deg_C, relative_humidity=0.0, obswl=1.0 micron):"
" (az, alt, distance) in (deg, deg, kpc)\n"
)
def test_ops():
"""
Tests miscellaneous operations like `len`
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg, frame="icrs")
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg, frame="icrs")
sc_empty = SkyCoord([] * u.deg, [] * u.deg, frame="icrs")
assert sc.isscalar
assert not sc_arr.isscalar
assert not sc_empty.isscalar
with pytest.raises(TypeError):
len(sc)
assert len(sc_arr) == 2
assert len(sc_empty) == 0
assert bool(sc)
assert bool(sc_arr)
assert not bool(sc_empty)
assert sc_arr[0].isscalar
assert len(sc_arr[:1]) == 1
# A scalar shouldn't be indexable
with pytest.raises(TypeError):
sc[0:]
# but it should be possible to just get an item
sc_item = sc[()]
assert sc_item.shape == ()
# and to turn it into an array
sc_1d = sc[np.newaxis]
assert sc_1d.shape == (1,)
with pytest.raises(TypeError):
iter(sc)
assert not isiterable(sc)
assert isiterable(sc_arr)
assert isiterable(sc_empty)
it = iter(sc_arr)
assert next(it).dec == sc_arr[0].dec
assert next(it).dec == sc_arr[1].dec
with pytest.raises(StopIteration):
next(it)
def test_none_transform():
"""
Ensure that transforming from a SkyCoord with no frame provided works like
ICRS
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg)
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg)
sc2 = sc.transform_to(ICRS)
assert sc.ra == sc2.ra and sc.dec == sc2.dec
sc5 = sc.transform_to("fk5")
assert sc5.ra == sc2.transform_to("fk5").ra
sc_arr2 = sc_arr.transform_to(ICRS)
sc_arr5 = sc_arr.transform_to("fk5")
npt.assert_array_equal(sc_arr5.ra, sc_arr2.transform_to("fk5").ra)
def test_position_angle():
c1 = SkyCoord(0 * u.deg, 0 * u.deg)
c2 = SkyCoord(1 * u.deg, 0 * u.deg)
assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0 * u.deg)
c3 = SkyCoord(1 * u.deg, 0.1 * u.deg)
assert c1.position_angle(c3) < 90 * u.deg
c4 = SkyCoord(0 * u.deg, 1 * u.deg)
assert_allclose(c1.position_angle(c4), 0 * u.deg)
carr1 = SkyCoord(0 * u.deg, [0, 1, 2] * u.deg)
carr2 = SkyCoord([-1, -2, -3] * u.deg, [0.1, 1.1, 2.1] * u.deg)
res = carr1.position_angle(carr2)
assert res.shape == (3,)
assert np.all(res < 360 * u.degree)
assert np.all(res > 270 * u.degree)
cicrs = SkyCoord(0 * u.deg, 0 * u.deg, frame="icrs")
cfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5")
# because of the frame transform, it's just a *bit* more than 90 degrees
assert cicrs.position_angle(cfk5) > 90.0 * u.deg
assert cicrs.position_angle(cfk5) < 91.0 * u.deg
def test_position_angle_directly():
"""Regression check for #3800: position_angle should accept floats."""
from astropy.coordinates.angle_utilities import position_angle
result = position_angle(10.0, 20.0, 10.0, 20.0)
assert result.unit is u.radian
assert result.value == 0.0
def test_sep_pa_equivalence():
"""Regression check for bug in #5702.
PA and separation from object 1 to 2 should be consistent with those
from 2 to 1
"""
cfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5")
cfk5B1950 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5", equinox="B1950")
# test with both default and explicit equinox #5722 and #3106
sep_forward = cfk5.separation(cfk5B1950)
sep_backward = cfk5B1950.separation(cfk5)
assert sep_forward != 0 and sep_backward != 0
assert_allclose(sep_forward, sep_backward)
posang_forward = cfk5.position_angle(cfk5B1950)
posang_backward = cfk5B1950.position_angle(cfk5)
assert posang_forward != 0 and posang_backward != 0
assert 179 < (posang_forward - posang_backward).wrap_at(360 * u.deg).degree < 181
dcfk5 = SkyCoord(1 * u.deg, 0 * u.deg, frame="fk5", distance=1 * u.pc)
dcfk5B1950 = SkyCoord(
1 * u.deg, 0 * u.deg, frame="fk5", equinox="B1950", distance=1.0 * u.pc
)
sep3d_forward = dcfk5.separation_3d(dcfk5B1950)
sep3d_backward = dcfk5B1950.separation_3d(dcfk5)
assert sep3d_forward != 0 and sep3d_backward != 0
assert_allclose(sep3d_forward, sep3d_backward)
def test_directional_offset_by():
# Round-trip tests: where is sc2 from sc1?
# Use those offsets from sc1 and verify you get to sc2.
npoints = 7 # How many points when doing vectors of SkyCoords
for sc1 in [
SkyCoord(0 * u.deg, -90 * u.deg), # South pole
SkyCoord(0 * u.deg, 90 * u.deg), # North pole
SkyCoord(1 * u.deg, 2 * u.deg),
SkyCoord(
np.linspace(0, 359, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="fk4",
),
SkyCoord(
np.linspace(359, 0, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="icrs",
),
SkyCoord(
np.linspace(-3, 3, npoints),
np.linspace(-90, 90, npoints),
unit=(u.rad, u.deg),
frame="barycentricmeanecliptic",
),
]:
for sc2 in [
SkyCoord(5 * u.deg, 10 * u.deg),
SkyCoord(
np.linspace(0, 359, npoints),
np.linspace(-90, 90, npoints),
unit=u.deg,
frame="galactic",
),
]:
# Find the displacement from sc1 to sc2,
posang = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
# then do the offset from sc1 and verify that you are at sc2
sc2a = sc1.directional_offset_by(position_angle=posang, separation=sep)
assert np.max(np.abs(sc2.separation(sc2a).arcsec)) < 1e-3
# Specific test cases
# Go over the North pole a little way, and
# over the South pole a long way, to get to same spot
sc1 = SkyCoord(0 * u.deg, 89 * u.deg)
for posang, sep in [(0 * u.deg, 2 * u.deg), (180 * u.deg, 358 * u.deg)]:
sc2 = sc1.directional_offset_by(posang, sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 89])
# Go twice as far to ensure that dec is actually changing
# and that >360deg is supported
sc2 = sc1.directional_offset_by(posang, 2 * sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 87])
# Verify that a separation of 180 deg in any direction gets to the antipode
# and 360 deg returns to start
sc1 = SkyCoord(10 * u.deg, 47 * u.deg)
for posang in np.linspace(0, 377, npoints):
sc2 = sc1.directional_offset_by(posang, 180 * u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [190, -47])
sc2 = sc1.directional_offset_by(posang, 360 * u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [10, 47])
# Verify that a 90 degree posang, which means East
# corresponds to an increase in RA, by ~separation/cos(dec) and
# a slight convergence to equator
sc1 = SkyCoord(10 * u.deg, 60 * u.deg)
sc2 = sc1.directional_offset_by(90 * u.deg, 1.0 * u.deg)
assert 11.9 < sc2.ra.degree < 12.0
assert 59.9 < sc2.dec.degree < 60.0
def test_table_to_coord():
"""
Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity`
initializer is the intermediary that translate the table columns into
something coordinates understands.
(Regression test for #1762 )
"""
from astropy.table import Column, Table
t = Table()
t.add_column(Column(data=[1, 2, 3], name="ra", unit=u.deg))
t.add_column(Column(data=[4, 5, 6], name="dec", unit=u.deg))
c = SkyCoord(t["ra"], t["dec"])
assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg)
assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg)
def assert_quantities_allclose(coord, q1s, attrs):
"""
Compare two tuples of quantities. This assumes that the values in q1 are of
order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the
two quantities are the *same*, in order to check that the representation
output has the expected units.
"""
q2s = [getattr(coord, attr) for attr in attrs]
assert len(q1s) == len(q2s)
for q1, q2 in zip(q1s, q2s):
assert q1.shape == q2.shape
assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit)
# Sets of inputs corresponding to Galactic frame
base_unit_attr_sets = [
("spherical", u.karcsec, u.karcsec, u.kpc, Latitude, "l", "b", "distance"),
("unitspherical", u.karcsec, u.karcsec, None, Latitude, "l", "b", None),
("physicsspherical", u.karcsec, u.karcsec, u.kpc, Angle, "phi", "theta", "r"),
("cartesian", u.km, u.km, u.km, u.Quantity, "u", "v", "w"),
("cylindrical", u.km, u.karcsec, u.km, Angle, "rho", "phi", "z"),
]
units_attr_sets = []
for base_unit_attr_set in base_unit_attr_sets:
repr_name = base_unit_attr_set[0]
for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]):
for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])):
for arrayify in True, False:
if arrayify:
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
units_attr_sets.append(
base_unit_attr_set + (representation, c1, c2, c3)
)
units_attr_args = (
"repr_name",
"unit1",
"unit2",
"unit3",
"cls2",
"attr1",
"attr2",
"attr3",
"representation",
"c1",
"c2",
"c3",
)
@pytest.mark.parametrize(
units_attr_args, [x for x in units_attr_sets if x[0] != "unitspherical"]
)
def test_skycoord_three_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(
c1,
c2,
c3,
unit=(unit1, unit2, unit3),
representation_type=representation,
frame=Galactic,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
sc = SkyCoord(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
1000 * c3 * u.Unit(unit3 / 1000),
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr3: c3}
sc = SkyCoord(
c1,
c2,
unit=(unit1, unit2, unit3),
frame=Galactic,
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr1: c1, attr2: c2, attr3: c3}
sc = SkyCoord(
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
@pytest.mark.parametrize(
units_attr_args,
[x for x in units_attr_sets if x[0] in ("spherical", "unitspherical")],
)
def test_skycoord_spherical_two_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(
c1, c2, unit=(unit1, unit2), frame=Galactic, representation_type=representation
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
sc = SkyCoord(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
frame=Galactic,
unit=(unit1, unit2, unit3),
representation_type=representation,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
kwargs = {attr1: c1, attr2: c2}
sc = SkyCoord(
frame=Galactic,
unit=(unit1, unit2),
representation_type=representation,
**kwargs,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
@pytest.mark.parametrize(
units_attr_args, [x for x in units_attr_sets if x[0] != "unitspherical"]
)
def test_galactic_three_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = Galactic(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
1000 * c3 * u.Unit(unit3 / 1000),
representation_type=representation,
)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr3: c3 * unit3}
sc = Galactic(c1 * unit1, c2 * unit2, representation_type=representation, **kwargs)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
kwargs = {attr1: c1 * unit1, attr2: c2 * unit2, attr3: c3 * unit3}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(
sc, (c1 * unit1, c2 * unit2, c3 * unit3), (attr1, attr2, attr3)
)
@pytest.mark.parametrize(
units_attr_args,
[x for x in units_attr_sets if x[0] in ("spherical", "unitspherical")],
)
def test_galactic_spherical_two_components(
repr_name,
unit1,
unit2,
unit3,
cls2,
attr1,
attr2,
attr3,
representation,
c1,
c2,
c3,
):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = Galactic(
1000 * c1 * u.Unit(unit1 / 1000),
cls2(c2, unit=unit2),
representation_type=representation,
)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
sc = Galactic(c1 * unit1, c2 * unit2, representation_type=representation)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
kwargs = {attr1: c1 * unit1, attr2: c2 * unit2}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1 * unit1, c2 * unit2), (attr1, attr2))
@pytest.mark.parametrize(
("repr_name", "unit1", "unit2", "unit3", "cls2", "attr1", "attr2", "attr3"),
[x for x in base_unit_attr_sets if x[0] != "unitspherical"],
)
def test_skycoord_coordinate_input(
repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3
):
c1, c2, c3 = 1, 2, 3
sc = SkyCoord(
[(c1, c2, c3)],
unit=(unit1, unit2, unit3),
representation_type=repr_name,
frame="galactic",
)
assert_quantities_allclose(
sc, ([c1] * unit1, [c2] * unit2, [c3] * unit3), (attr1, attr2, attr3)
)
c1, c2, c3 = 1 * unit1, 2 * unit2, 3 * unit3
sc = SkyCoord([(c1, c2, c3)], representation_type=repr_name, frame="galactic")
assert_quantities_allclose(
sc, ([1] * unit1, [2] * unit2, [3] * unit3), (attr1, attr2, attr3)
)
def test_skycoord_string_coordinate_input():
sc = SkyCoord("01 02 03 +02 03 04", unit="deg", representation_type="unitspherical")
assert_quantities_allclose(
sc,
(Angle("01:02:03", unit="deg"), Angle("02:03:04", unit="deg")),
("ra", "dec"),
)
sc = SkyCoord(
["01 02 03 +02 03 04"], unit="deg", representation_type="unitspherical"
)
assert_quantities_allclose(
sc,
(Angle(["01:02:03"], unit="deg"), Angle(["02:03:04"], unit="deg")),
("ra", "dec"),
)
def test_units():
sc = SkyCoord(1, 2, 3, unit="m", representation_type="cartesian") # All get meters
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
# All get u.m
sc = SkyCoord(1, 2 * u.km, 3, unit="m", representation_type="cartesian")
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit=u.m, representation_type="cartesian") # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit="m, km, pc", representation_type="cartesian")
assert_quantities_allclose(sc, (1 * u.m, 2 * u.km, 3 * u.pc), ("x", "y", "z"))
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, 3, unit=(u.m, u.m), representation_type="cartesian")
assert "should have matching physical types" in str(err.value)
SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation_type="cartesian")
assert_quantities_allclose(sc, (1 * u.m, 2 * u.km, 3 * u.pc), ("x", "y", "z"))
@pytest.mark.xfail
def test_units_known_fail():
# should fail but doesn't => corner case oddity
with pytest.raises(u.UnitsError):
SkyCoord(1, 2, 3, unit=u.deg, representation_type="spherical")
def test_nodata_failure():
with pytest.raises(ValueError):
SkyCoord()
@pytest.mark.parametrize(("mode", "origin"), [("wcs", 0), ("all", 0), ("all", 1)])
def test_wcs_methods(mode, origin):
from astropy.utils.data import get_pkg_data_contents
from astropy.wcs import WCS
from astropy.wcs.utils import pixel_to_skycoord
header = get_pkg_data_contents(
"../../wcs/tests/data/maps/1904-66_TAN.hdr", encoding="binary"
)
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89.0 * u.deg, frame="icrs")
xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to("icrs")
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# also try to round-trip with `from_pixel`
scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to(
"icrs"
)
assert_allclose(scnew.ra.degree, ref.ra.degree)
assert_allclose(scnew.dec.degree, ref.dec.degree)
# Also make sure the right type comes out
class SkyCoord2(SkyCoord):
pass
scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin)
assert scnew.__class__ is SkyCoord
assert scnew2.__class__ is SkyCoord2
def test_frame_attr_transform_inherit():
"""
Test that frame attributes get inherited as expected during transform.
Driven by #3106.
"""
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5)
c2 = c.transform_to(FK4)
assert c2.equinox.value == "B1950.000"
assert c2.obstime.value == "B1950.000"
c2 = c.transform_to(FK4(equinox="J1975", obstime="J1980"))
assert c2.equinox.value == "J1975.000"
assert c2.obstime.value == "J1980.000"
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4)
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J2000.000"
assert c2.obstime is None
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime="J1980")
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J2000.000"
assert c2.obstime.value == "J1980.000"
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox="J1975", obstime="J1980")
c2 = c.transform_to(FK5)
assert c2.equinox.value == "J1975.000"
assert c2.obstime.value == "J1980.000"
c2 = c.transform_to(FK5(equinox="J1990"))
assert c2.equinox.value == "J1990.000"
assert c2.obstime.value == "J1980.000"
# The work-around for #5722
c = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5")
c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", equinox="B1950.000")
c2 = c1.transform_to(c)
assert not c2.is_equivalent_frame(c) # counterintuitive, but documented
assert c2.equinox.value == "B1950.000"
c3 = c1.transform_to(c, merge_attributes=False)
assert c3.equinox.value == "J2000.000"
assert c3.is_equivalent_frame(c)
def test_deepcopy():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
c2 = copy.copy(c1)
c3 = copy.deepcopy(c1)
c4 = SkyCoord(
[1, 2] * u.m,
[2, 3] * u.m,
[3, 4] * u.m,
representation_type="cartesian",
frame="fk5",
obstime="J1999.9",
equinox="J1988.8",
)
c5 = copy.deepcopy(c4)
assert np.all(c5.x == c4.x) # and y and z
assert c5.frame.name == c4.frame.name
assert c5.obstime == c4.obstime
assert c5.equinox == c4.equinox
assert c5.representation_type == c4.representation_type
def test_no_copy():
c1 = SkyCoord(np.arange(10.0) * u.hourangle, np.arange(20.0, 30.0) * u.deg)
c2 = SkyCoord(c1, copy=False)
# Note: c1.ra and c2.ra will *not* share memory, as these are recalculated
# to be in "preferred" units. See discussion in #4883.
assert np.may_share_memory(c1.data.lon, c2.data.lon)
c3 = SkyCoord(c1, copy=True)
assert not np.may_share_memory(c1.data.lon, c3.data.lon)
def test_immutable():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(AttributeError):
c1.ra = 3.0
c1.foo = 42
assert c1.foo == 42
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_search_around():
"""
Test the search_around_* methods
Here we don't actually test the values are right, just that the methods of
SkyCoord work. The accuracy tests are in ``test_matching.py``
"""
from astropy.utils import NumpyRNGContext
with NumpyRNGContext(987654321):
sc1 = SkyCoord(
np.random.rand(20) * 360.0 * u.degree,
(np.random.rand(20) * 180.0 - 90.0) * u.degree,
)
sc2 = SkyCoord(
np.random.rand(100) * 360.0 * u.degree,
(np.random.rand(100) * 180.0 - 90.0) * u.degree,
)
sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20) * u.kpc)
sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100) * u.kpc)
idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10 * u.deg)
idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250 * u.pc)
def test_init_with_frame_instance_keyword():
# Frame instance
c1 = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox="J2010"))
assert c1.equinox == Time("J2010")
# Frame instance with data (data gets ignored)
c2 = SkyCoord(
3 * u.deg, 4 * u.deg, frame=FK5(1.0 * u.deg, 2 * u.deg, equinox="J2010")
)
assert c2.equinox == Time("J2010")
assert allclose(c2.ra.degree, 3)
assert allclose(c2.dec.degree, 4)
# SkyCoord instance
c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1)
assert c3.equinox == Time("J2010")
# Check duplicate arguments
with pytest.raises(ValueError) as err:
c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox="J2010"), equinox="J2001")
assert "Cannot specify frame attribute 'equinox'" in str(err.value)
def test_guess_from_table():
from astropy.table import Column, Table
from astropy.utils import NumpyRNGContext
tab = Table()
with NumpyRNGContext(987654321):
tab.add_column(Column(data=np.random.rand(10), unit="deg", name="RA[J2000]"))
tab.add_column(Column(data=np.random.rand(10), unit="deg", name="DEC[J2000]"))
sc = SkyCoord.guess_from_table(tab)
npt.assert_array_equal(sc.ra.deg, tab["RA[J2000]"])
npt.assert_array_equal(sc.dec.deg, tab["DEC[J2000]"])
# try without units in the table
tab["RA[J2000]"].unit = None
tab["DEC[J2000]"].unit = None
# should fail if not given explicitly
with pytest.raises(u.UnitsError):
sc2 = SkyCoord.guess_from_table(tab)
# but should work if provided
sc2 = SkyCoord.guess_from_table(tab, unit=u.deg)
npt.assert_array_equal(sc2.ra.deg, tab["RA[J2000]"])
npt.assert_array_equal(sc2.dec.deg, tab["DEC[J2000]"])
# should fail if two options are available - ambiguity bad!
tab.add_column(Column(data=np.random.rand(10), name="RA_J1900"))
with pytest.raises(ValueError) as excinfo:
SkyCoord.guess_from_table(tab, unit=u.deg)
assert "J1900" in excinfo.value.args[0] and "J2000" in excinfo.value.args[0]
tab.remove_column("RA_J1900")
tab["RA[J2000]"].unit = u.deg
tab["DEC[J2000]"].unit = u.deg
# but should succeed if the ambiguity can be broken b/c one of the matches
# is the name of a different component
tab.add_column(Column(data=np.random.rand(10) * u.mas / u.yr, name="pm_ra_cosdec"))
tab.add_column(Column(data=np.random.rand(10) * u.mas / u.yr, name="pm_dec"))
sc3 = SkyCoord.guess_from_table(tab)
assert u.allclose(sc3.ra, tab["RA[J2000]"])
assert u.allclose(sc3.dec, tab["DEC[J2000]"])
assert u.allclose(sc3.pm_ra_cosdec, tab["pm_ra_cosdec"])
assert u.allclose(sc3.pm_dec, tab["pm_dec"])
# should fail if stuff doesn't have proper units
tab["RA[J2000]"].unit = None
tab["DEC[J2000]"].unit = None
with pytest.raises(u.UnitTypeError, match="no unit was given."):
SkyCoord.guess_from_table(tab)
tab.remove_column("pm_ra_cosdec")
tab.remove_column("pm_dec")
# should also fail if user specifies something already in the table, but
# should succeed even if the user has to give one of the components
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tab, ra=tab["RA[J2000]"], unit=u.deg)
oldra = tab["RA[J2000]"]
tab.remove_column("RA[J2000]")
sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg)
npt.assert_array_equal(sc3.ra.deg, oldra)
npt.assert_array_equal(sc3.dec.deg, tab["DEC[J2000]"])
# check a few non-ICRS/spherical systems
x, y, z = np.arange(3).reshape(3, 1) * u.pc
l, b = np.arange(2).reshape(2, 1) * u.deg
tabcart = Table([x, y, z], names=("x", "y", "z"))
tabgal = Table([b, l], names=("b", "l"))
sc_cart = SkyCoord.guess_from_table(tabcart, representation_type="cartesian")
npt.assert_array_equal(sc_cart.x, x)
npt.assert_array_equal(sc_cart.y, y)
npt.assert_array_equal(sc_cart.z, z)
sc_gal = SkyCoord.guess_from_table(tabgal, frame="galactic")
npt.assert_array_equal(sc_gal.l, l)
npt.assert_array_equal(sc_gal.b, b)
# also try some column names that *end* with the attribute name
tabgal["b"].name = "gal_b"
tabgal["l"].name = "gal_l"
SkyCoord.guess_from_table(tabgal, frame="galactic")
tabgal["gal_b"].name = "blob"
tabgal["gal_l"].name = "central"
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tabgal, frame="galactic")
def test_skycoord_list_creation():
"""
Test that SkyCoord can be created in a reasonable way with lists of SkyCoords
(regression for #2702)
"""
sc = SkyCoord(ra=[1, 2, 3] * u.deg, dec=[4, 5, 6] * u.deg)
sc0 = sc[0]
sc2 = sc[2]
scnew = SkyCoord([sc0, sc2])
assert np.all(scnew.ra == [1, 3] * u.deg)
assert np.all(scnew.dec == [4, 6] * u.deg)
# also check ranges
sc01 = sc[:2]
scnew2 = SkyCoord([sc01, sc2])
assert np.all(scnew2.ra == sc.ra)
assert np.all(scnew2.dec == sc.dec)
# now try with a mix of skycoord, frame, and repr objects
frobj = ICRS(2 * u.deg, 5 * u.deg)
reprobj = UnitSphericalRepresentation(3 * u.deg, 6 * u.deg)
scnew3 = SkyCoord([sc0, frobj, reprobj])
assert np.all(scnew3.ra == sc.ra)
assert np.all(scnew3.dec == sc.dec)
# should *fail* if different frame attributes or types are passed in
scfk5_j2000 = SkyCoord(1 * u.deg, 4 * u.deg, frame="fk5")
with pytest.raises(ValueError):
SkyCoord([sc0, scfk5_j2000])
scfk5_j2010 = SkyCoord(1 * u.deg, 4 * u.deg, frame="fk5", equinox="J2010")
with pytest.raises(ValueError):
SkyCoord([scfk5_j2000, scfk5_j2010])
# but they should inherit if they're all consistent
scfk5_2_j2010 = SkyCoord(2 * u.deg, 5 * u.deg, frame="fk5", equinox="J2010")
scfk5_3_j2010 = SkyCoord(3 * u.deg, 6 * u.deg, frame="fk5", equinox="J2010")
scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010])
assert np.all(scnew4.ra == sc.ra)
assert np.all(scnew4.dec == sc.dec)
assert scnew4.equinox == Time("J2010")
def test_nd_skycoord_to_string():
c = SkyCoord(np.ones((2, 2)), 1, unit=("deg", "deg"))
ts = c.to_string()
assert np.all(ts.shape == c.shape)
assert np.all(ts == "1 1")
def test_equiv_skycoord():
sci1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
sci2 = SkyCoord(1 * u.deg, 3 * u.deg, frame="icrs")
assert sci1.is_equivalent_frame(sci1)
assert sci1.is_equivalent_frame(sci2)
assert sci1.is_equivalent_frame(ICRS())
assert not sci1.is_equivalent_frame(FK5())
with pytest.raises(TypeError):
sci1.is_equivalent_frame(10)
scf1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5")
scf2 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", equinox="J2005")
# obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come
# to come out different because they're part of SkyCoord
scf3 = SkyCoord(1 * u.deg, 2 * u.deg, frame="fk5", obstime="J2005")
assert scf1.is_equivalent_frame(scf1)
assert not scf1.is_equivalent_frame(sci1)
assert scf1.is_equivalent_frame(FK5())
assert not scf1.is_equivalent_frame(scf2)
assert scf2.is_equivalent_frame(FK5(equinox="J2005"))
assert not scf3.is_equivalent_frame(scf1)
assert not scf3.is_equivalent_frame(FK5(equinox="J2005"))
def test_equiv_skycoord_with_extra_attrs():
"""Regression test for #10658."""
# GCRS has a CartesianRepresentationAttribute called obsgeoloc
gcrs = GCRS(
1 * u.deg, 2 * u.deg, obsgeoloc=CartesianRepresentation([1, 2, 3], unit=u.m)
)
# Create a SkyCoord where obsgeoloc tags along as an extra attribute
sc1 = SkyCoord(gcrs).transform_to(ICRS)
# Now create a SkyCoord with an equivalent frame but without the extra attribute
sc2 = SkyCoord(sc1.frame)
# The SkyCoords are therefore not equivalent, but check both directions
assert not sc1.is_equivalent_frame(sc2)
# This way around raised a TypeError which is fixed by #10658
assert not sc2.is_equivalent_frame(sc1)
def test_constellations():
# the actual test for accuracy is in test_funcs - this is just meant to make
# sure we get sensible answers
sc = SkyCoord(135 * u.deg, 65 * u.deg)
assert sc.get_constellation() == "Ursa Major"
assert sc.get_constellation(short_name=True) == "UMa"
scs = SkyCoord([135] * 2 * u.deg, [65] * 2 * u.deg)
npt.assert_equal(scs.get_constellation(), ["Ursa Major"] * 2)
npt.assert_equal(scs.get_constellation(short_name=True), ["UMa"] * 2)
@pytest.mark.remote_data
def test_constellations_with_nameresolve():
assert SkyCoord.from_name("And I").get_constellation(short_name=True) == "And"
# you'd think "And ..." should be in Andromeda. But you'd be wrong.
assert SkyCoord.from_name("And VI").get_constellation() == "Pegasus"
# maybe it's because And VI isn't really a galaxy?
assert SkyCoord.from_name("And XXII").get_constellation() == "Pisces"
assert SkyCoord.from_name("And XXX").get_constellation() == "Cassiopeia"
# ok maybe not
# ok, but at least some of the others do make sense...
assert (
SkyCoord.from_name("Coma Cluster").get_constellation(short_name=True) == "Com"
)
assert SkyCoord.from_name("Orion Nebula").get_constellation() == "Orion"
assert SkyCoord.from_name("Triangulum Galaxy").get_constellation() == "Triangulum"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg)
sc.representation_type = "cartesian"
assert sc[0].representation_type is CartesianRepresentation
def test_spherical_offsets_to_api():
i00 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame="icrs")
fk5 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame="fk5")
with pytest.raises(ValueError):
# different frames should fail
i00.spherical_offsets_to(fk5)
i1deg = ICRS(1 * u.deg, 1 * u.deg)
dra, ddec = i00.spherical_offsets_to(i1deg)
assert_allclose(dra, 1 * u.deg)
assert_allclose(ddec, 1 * u.deg)
# make sure an abbreviated array-based version of the above also works
i00s = SkyCoord([0] * 4 * u.arcmin, [0] * 4 * u.arcmin, frame="icrs")
i01s = SkyCoord([0] * 4 * u.arcmin, np.arange(4) * u.arcmin, frame="icrs")
dra, ddec = i00s.spherical_offsets_to(i01s)
assert_allclose(dra, 0 * u.arcmin)
assert_allclose(ddec, np.arange(4) * u.arcmin)
@pytest.mark.parametrize("frame", ["icrs", "galactic"])
@pytest.mark.parametrize(
"comparison_data",
[
(0 * u.arcmin, 1 * u.arcmin),
(1 * u.arcmin, 0 * u.arcmin),
(1 * u.arcmin, 1 * u.arcmin),
],
)
def test_spherical_offsets_roundtrip(frame, comparison_data):
i00 = SkyCoord(0 * u.arcmin, 0 * u.arcmin, frame=frame)
comparison = SkyCoord(*comparison_data, frame=frame)
dlon, dlat = i00.spherical_offsets_to(comparison)
assert_allclose(dlon, comparison.data.lon)
assert_allclose(dlat, comparison.data.lat)
i00_back = comparison.spherical_offsets_by(-dlon, -dlat)
# This reaches machine precision when only one component is changed, but for
# the third parametrized case (both lon and lat change), the transformation
# will have finite accuracy:
assert_allclose(i00_back.data.lon, i00.data.lon, atol=1e-10 * u.rad)
assert_allclose(i00_back.data.lat, i00.data.lat, atol=1e-10 * u.rad)
# Test roundtripping the other direction:
init_c = SkyCoord(40.0 * u.deg, 40.0 * u.deg, frame=frame)
new_c = init_c.spherical_offsets_by(3.534 * u.deg, 2.2134 * u.deg)
dlon, dlat = new_c.spherical_offsets_to(init_c)
back_c = new_c.spherical_offsets_by(dlon, dlat)
assert init_c.separation(back_c) < 1e-10 * u.deg
def test_frame_attr_changes():
"""
This tests the case where a frame is added with a new frame attribute after
a SkyCoord has been created. This is necessary because SkyCoords get the
attributes set at creation time, but the set of attributes can change as
frames are added or removed from the transform graph. This makes sure that
everything continues to work consistently.
"""
sc_before = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
assert "fakeattr" not in dir(sc_before)
class FakeFrame(BaseCoordinateFrame):
fakeattr = Attribute()
# doesn't matter what this does as long as it just puts the frame in the
# transform graph
transset = (ICRS, FakeFrame, lambda c, f: c)
frame_transform_graph.add_transform(*transset)
try:
assert "fakeattr" in dir(sc_before)
assert sc_before.fakeattr is None
sc_after1 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs")
assert "fakeattr" in dir(sc_after1)
assert sc_after1.fakeattr is None
sc_after2 = SkyCoord(1 * u.deg, 2 * u.deg, frame="icrs", fakeattr=1)
assert sc_after2.fakeattr == 1
finally:
frame_transform_graph.remove_transform(*transset)
assert "fakeattr" not in dir(sc_before)
assert "fakeattr" not in dir(sc_after1)
assert "fakeattr" not in dir(sc_after2)
def test_cache_clear_sc():
from astropy.coordinates import SkyCoord
i = SkyCoord(1 * u.deg, 2 * u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache["representation"]) == 2
i.cache.clear()
assert len(i.cache["representation"]) == 0
def test_set_attribute_exceptions():
"""Ensure no attribute for any frame can be set directly.
Though it is fine if the current frame does not have it."""
sc = SkyCoord(1.0 * u.deg, 2.0 * u.deg, frame="fk5")
assert hasattr(sc.frame, "equinox")
with pytest.raises(AttributeError):
sc.equinox = "B1950"
assert sc.relative_humidity is None
sc.relative_humidity = 0.5
assert sc.relative_humidity == 0.5
assert not hasattr(sc.frame, "relative_humidity")
def test_extra_attributes():
"""Ensure any extra attributes are dealt with correctly.
Regression test against #5743.
"""
obstime_string = ["2017-01-01T00:00", "2017-01-01T00:10"]
obstime = Time(obstime_string)
sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string)
assert not hasattr(sc.frame, "obstime")
assert type(sc.obstime) is Time
assert sc.obstime.shape == (2,)
assert np.all(sc.obstime == obstime)
# ensure equivalency still works for more than one obstime.
assert sc.is_equivalent_frame(sc)
sc_1 = sc[1]
assert sc_1.obstime == obstime[1]
# Transforming to FK4 should use sc.obstime.
sc_fk4 = sc.transform_to("fk4")
assert np.all(sc_fk4.frame.obstime == obstime)
# And transforming back should not loose it.
sc2 = sc_fk4.transform_to("icrs")
assert not hasattr(sc2.frame, "obstime")
assert np.all(sc2.obstime == obstime)
# Ensure obstime get taken from the SkyCoord if passed in directly.
# (regression test for #5749).
sc3 = SkyCoord([0.0, 1.0], [2.0, 3.0], unit="deg", frame=sc)
assert np.all(sc3.obstime == obstime)
# Finally, check that we can delete such attributes.
del sc3.obstime
assert sc3.obstime is None
def test_apply_space_motion():
# use this 12 year period because it's a multiple of 4 to avoid the quirks
# of leap years while having 2 leap seconds in it
t1 = Time("2000-01-01T00:00")
t2 = Time("2012-01-01T00:00")
# Check a very simple case first:
frame = ICRS(
ra=10.0 * u.deg,
dec=0 * u.deg,
distance=10.0 * u.pc,
pm_ra_cosdec=0.1 * u.deg / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
# Cases that should work (just testing input for now):
c1 = SkyCoord(frame, obstime=t1, pressure=101 * u.kPa)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied1 = c1.apply_space_motion(new_obstime=t2)
applied2 = c1.apply_space_motion(dt=12 * u.year)
assert isinstance(applied1.frame, c1.frame.__class__)
assert isinstance(applied2.frame, c1.frame.__class__)
assert_allclose(applied1.ra, applied2.ra)
assert_allclose(applied1.pm_ra_cosdec, applied2.pm_ra_cosdec)
assert_allclose(applied1.dec, applied2.dec)
assert_allclose(applied1.distance, applied2.distance)
# ensure any frame attributes that were there before get passed through
assert applied1.pressure == c1.pressure
# there were 2 leap seconds between 2000 and 2010, so the difference in
# the two forms of time evolution should be ~2 sec
adt = np.abs(applied2.obstime - applied1.obstime)
assert 1.9 * u.second < adt.to(u.second) < 2.1 * u.second
c2 = SkyCoord(frame)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied3 = c2.apply_space_motion(dt=6 * u.year)
assert isinstance(applied3.frame, c1.frame.__class__)
assert applied3.obstime is None
# this should *not* be .6 deg due to space-motion on a sphere, but it
# should be fairly close
assert 0.5 * u.deg < applied3.ra - c1.ra < 0.7 * u.deg
# the two cases should only match somewhat due to it being space motion, but
# they should be at least this close
assert quantity_allclose(
applied1.ra - c1.ra, (applied3.ra - c1.ra) * 2, atol=1e-3 * u.deg
)
# but *not* this close
assert not quantity_allclose(
applied1.ra - c1.ra, (applied3.ra - c1.ra) * 2, atol=1e-4 * u.deg
)
with pytest.raises(ValueError):
c2.apply_space_motion(new_obstime=t2)
def test_custom_frame_skycoord():
# also regression check for the case from #7069
class BlahBleeBlopFrame(BaseCoordinateFrame):
default_representation = SphericalRepresentation
# without a differential, SkyCoord creation fails
# default_differential = SphericalDifferential
_frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "lon", "recommended"),
RepresentationMapping("lat", "lat", "recommended"),
RepresentationMapping("distance", "radius", "recommended"),
]
}
SkyCoord(lat=1 * u.deg, lon=2 * u.deg, frame=BlahBleeBlopFrame)
def test_user_friendly_pm_error():
"""
This checks that a more user-friendly error message is raised for the user
if they pass, e.g., pm_ra instead of pm_ra_cosdec
"""
with pytest.raises(ValueError) as e:
SkyCoord(
ra=150 * u.deg,
dec=-11 * u.deg,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
)
assert "pm_ra_cosdec" in str(e.value)
with pytest.raises(ValueError) as e:
SkyCoord(
l=150 * u.deg,
b=-11 * u.deg,
pm_l=100 * u.mas / u.yr,
pm_b=10 * u.mas / u.yr,
frame="galactic",
)
assert "pm_l_cosb" in str(e.value)
# The special error should not turn on here:
with pytest.raises(ValueError) as e:
SkyCoord(
x=1 * u.pc,
y=2 * u.pc,
z=3 * u.pc,
pm_ra=100 * u.mas / u.yr,
pm_dec=10 * u.mas / u.yr,
representation_type="cartesian",
)
assert "pm_ra_cosdec" not in str(e.value)
def test_contained_by():
"""
Test Skycoord.contained(wcs,image)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
test_wcs = WCS(fits.Header.fromstring(header.strip(), "\n"))
assert SkyCoord(254, 2, unit="deg").contained_by(test_wcs)
assert not SkyCoord(240, 2, unit="deg").contained_by(test_wcs)
img = np.zeros((2136, 2078))
assert SkyCoord(250, 2, unit="deg").contained_by(test_wcs, img)
assert not SkyCoord(240, 2, unit="deg").contained_by(test_wcs, img)
ra = np.array([254.2, 254.1])
dec = np.array([2, 12.1])
coords = SkyCoord(ra, dec, unit="deg")
assert np.all(test_wcs.footprint_contains(coords) == np.array([True, False]))
def test_none_differential_type():
"""
This is a regression test for #8021
"""
from astropy.coordinates import BaseCoordinateFrame
class MockHeliographicStonyhurst(BaseCoordinateFrame):
default_representation = SphericalRepresentation
frame_specific_representation_info = {
SphericalRepresentation: [
RepresentationMapping(
reprname="lon", framename="lon", defaultunit=u.deg
),
RepresentationMapping(
reprname="lat", framename="lat", defaultunit=u.deg
),
RepresentationMapping(
reprname="distance", framename="radius", defaultunit=None
),
]
}
fr = MockHeliographicStonyhurst(lon=1 * u.deg, lat=2 * u.deg, radius=10 * u.au)
SkyCoord(0 * u.deg, fr.lat, fr.radius, frame=fr) # this was the failure
def test_multiple_aliases():
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ["alias_1", "alias_2"]
default_representation = SphericalRepresentation
# Register a transform, which adds the aliases to the transform graph
tfun = lambda c, f: f.__class__(lon=c.lon, lat=c.lat)
ftrans = FunctionTransform(
tfun,
MultipleAliasesFrame,
MultipleAliasesFrame,
register_graph=frame_transform_graph,
)
coord = SkyCoord(lon=1 * u.deg, lat=2 * u.deg, frame=MultipleAliasesFrame)
# Test attribute-style access returns self (not a copy)
assert coord.alias_1 is coord
assert coord.alias_2 is coord
# Test for aliases in __dir__()
assert "alias_1" in coord.__dir__()
assert "alias_2" in coord.__dir__()
# Test transform_to() calls
assert isinstance(coord.transform_to("alias_1").frame, MultipleAliasesFrame)
assert isinstance(coord.transform_to("alias_2").frame, MultipleAliasesFrame)
ftrans.unregister(frame_transform_graph)
@pytest.mark.parametrize(
"kwargs, error_message",
[
(
{"ra": 1, "dec": 1, "distance": 1 * u.pc, "unit": "deg"},
r"Unit 'deg' \(angle\) could not be applied to 'distance'. ",
),
(
{
"rho": 1 * u.m,
"phi": 1,
"z": 1 * u.m,
"unit": "deg",
"representation_type": "cylindrical",
},
r"Unit 'deg' \(angle\) could not be applied to 'rho'. ",
),
],
)
def test_passing_inconsistent_coordinates_and_units_raises_helpful_error(
kwargs, error_message
):
# https://github.com/astropy/astropy/issues/10725
with pytest.raises(ValueError, match=error_message):
SkyCoord(**kwargs)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_match_to_catalog_3d_and_sky():
# Test for issue #5857. See PR #11449
cfk5_default = SkyCoord(
[1, 2, 3, 4] * u.degree,
[0, 0, 0, 0] * u.degree,
distance=[1, 1, 1.5, 1] * u.kpc,
frame="fk5",
)
cfk5_J1950 = cfk5_default.transform_to(FK5(equinox="J1950"))
idx, angle, quantity = cfk5_J1950.match_to_catalog_3d(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=1e-14 * u.deg, rtol=0)
assert_allclose(quantity, 0 * u.kpc, atol=1e-14 * u.kpc, rtol=0)
idx, angle, distance = cfk5_J1950.match_to_catalog_sky(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=1e-14 * u.deg, rtol=0)
assert_allclose(distance, 0 * u.kpc, atol=1e-14 * u.kpc, rtol=0)
def test_subclass_property_exception_error():
"""Regression test for gh-8340.
Non-existing attribute access inside a property should give attribute
error for the attribute, not for the property.
"""
class custom_coord(SkyCoord):
@property
def prop(self):
return self.random_attr
c = custom_coord("00h42m30s", "+41d12m00s", frame="icrs")
with pytest.raises(AttributeError, match="random_attr"):
# Before this matched "prop" rather than "random_attr"
c.prop
|
ef776c97e2e961378f3c8efc45c8b44c93b478d0197a579d1b6442513cdbf4ab | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization of angles not already covered by the API tests"""
import pickle
import numpy as np
import pytest
from astropy import constants
from astropy import units as u
from astropy.coordinates.angles import Latitude, Longitude
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.name_resolve import NameResolveError
from astropy.coordinates.representation.geodetic import ELLIPSOIDS
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.units.tests.test_quantity_erfa_ufuncs import vvd
def allclose_m14(a, b, rtol=1.0e-14, atol=None):
if atol is None:
atol = 1.0e-14 * getattr(a, "unit", 1)
return quantity_allclose(a, b, rtol, atol)
def allclose_m8(a, b, rtol=1.0e-8, atol=None):
if atol is None:
atol = 1.0e-8 * getattr(a, "unit", 1)
return quantity_allclose(a, b, rtol, atol)
def isclose_m14(val, ref):
return np.array([allclose_m14(v, r) for (v, r) in zip(val, ref)])
def isclose_m8(val, ref):
return np.array([allclose_m8(v, r) for (v, r) in zip(val, ref)])
def test_gc2gd():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
x, y, z = (2e6, 3e6, 5.244e6)
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geocentric(x, y, z, u.m)
e, p, h = location.to_geodetic("WGS84")
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic("GRS80")
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic("WGS72")
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
def test_gd2gc():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc"""
e = 3.1 * u.rad
p = -0.5 * u.rad
h = 2500.0 * u.m
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geodetic(e, p, h, ellipsoid="WGS84")
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid="GRS80")
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid="WGS72")
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
class TestInput:
def setup_method(self):
self.lon = Longitude(
[0.0, 45.0, 90.0, 135.0, 180.0, -180, -90, -45],
u.deg,
wrap_angle=180 * u.deg,
)
self.lat = Latitude([+0.0, 30.0, 60.0, +90.0, -90.0, -60.0, -30.0, 0.0], u.deg)
self.h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11.0, -0.1], u.m)
self.location = EarthLocation.from_geodetic(self.lon, self.lat, self.h)
self.x, self.y, self.z = self.location.to_geocentric()
def test_default_ellipsoid(self):
assert self.location.ellipsoid == EarthLocation._ellipsoid
def test_geo_attributes(self):
assert all(
np.all(_1 == _2)
for _1, _2 in zip(self.location.geodetic, self.location.to_geodetic())
)
assert all(
np.all(_1 == _2)
for _1, _2 in zip(self.location.geocentric, self.location.to_geocentric())
)
def test_attribute_classes(self):
"""Test that attribute classes are correct (and not EarthLocation)"""
assert type(self.location.x) is u.Quantity
assert type(self.location.y) is u.Quantity
assert type(self.location.z) is u.Quantity
assert type(self.location.lon) is Longitude
assert type(self.location.lat) is Latitude
assert type(self.location.height) is u.Quantity
def test_input(self):
"""Check input is parsed correctly"""
# units of length should be assumed geocentric
geocentric = EarthLocation(self.x, self.y, self.z)
assert np.all(geocentric == self.location)
geocentric2 = EarthLocation(
self.x.value, self.y.value, self.z.value, self.x.unit
)
assert np.all(geocentric2 == self.location)
geodetic = EarthLocation(self.lon, self.lat, self.h)
assert np.all(geodetic == self.location)
geodetic2 = EarthLocation(
self.lon.to_value(u.degree),
self.lat.to_value(u.degree),
self.h.to_value(u.m),
)
assert np.all(geodetic2 == self.location)
geodetic3 = EarthLocation(self.lon, self.lat)
assert allclose_m14(geodetic3.lon.value, self.location.lon.value)
assert allclose_m14(geodetic3.lat.value, self.location.lat.value)
assert not np.any(
isclose_m14(geodetic3.height.value, self.location.height.value)
)
geodetic4 = EarthLocation(self.lon, self.lat, self.h[-1])
assert allclose_m14(geodetic4.lon.value, self.location.lon.value)
assert allclose_m14(geodetic4.lat.value, self.location.lat.value)
assert allclose_m14(geodetic4.height[-1].value, self.location.height[-1].value)
assert not np.any(
isclose_m14(geodetic4.height[:-1].value, self.location.height[:-1].value)
)
# check length unit preservation
geocentric5 = EarthLocation(self.x, self.y, self.z, u.pc)
assert geocentric5.unit is u.pc
assert geocentric5.x.unit is u.pc
assert geocentric5.height.unit is u.pc
assert allclose_m14(geocentric5.x.to_value(self.x.unit), self.x.value)
geodetic5 = EarthLocation(self.lon, self.lat, self.h.to(u.pc))
assert geodetic5.unit is u.pc
assert geodetic5.x.unit is u.pc
assert geodetic5.height.unit is u.pc
assert allclose_m14(geodetic5.x.to_value(self.x.unit), self.x.value)
def test_invalid_input(self):
"""Check invalid input raises exception"""
# incomprehensible by either raises TypeError
with pytest.raises(TypeError):
EarthLocation(self.lon, self.y, self.z)
# wrong units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.lon, self.lat, self.lat)
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.h, self.lon, self.lat)
# floats without a unit
with pytest.raises(TypeError):
EarthLocation.from_geocentric(self.x.value, self.y.value, self.z.value)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geocentric(self.x, self.y, self.z[:5])
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geodetic(self.x, self.y, self.z)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h[:5])
def test_slicing(self):
# test on WGS72 location, so we can check the ellipsoid is passed on
locwgs72 = EarthLocation.from_geodetic(
self.lon, self.lat, self.h, ellipsoid="WGS72"
)
loc_slice1 = locwgs72[4]
assert isinstance(loc_slice1, EarthLocation)
assert loc_slice1.unit is locwgs72.unit
assert loc_slice1.ellipsoid == locwgs72.ellipsoid == "WGS72"
assert not loc_slice1.shape
with pytest.raises(TypeError):
loc_slice1[0]
with pytest.raises(IndexError):
len(loc_slice1)
loc_slice2 = locwgs72[4:6]
assert isinstance(loc_slice2, EarthLocation)
assert len(loc_slice2) == 2
assert loc_slice2.unit is locwgs72.unit
assert loc_slice2.ellipsoid == locwgs72.ellipsoid
assert loc_slice2.shape == (2,)
loc_x = locwgs72["x"]
assert type(loc_x) is u.Quantity
assert loc_x.shape == locwgs72.shape
assert loc_x.unit is locwgs72.unit
def test_invalid_ellipsoid(self):
# unknown ellipsoid
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h, ellipsoid="foo")
with pytest.raises(TypeError):
EarthLocation(self.lon, self.lat, self.h, ellipsoid="foo")
with pytest.raises(ValueError):
self.location.ellipsoid = "foo"
with pytest.raises(ValueError):
self.location.to_geodetic("foo")
@pytest.mark.parametrize("ellipsoid", ELLIPSOIDS)
def test_ellipsoid(self, ellipsoid):
"""Test that different ellipsoids are understood, and differ"""
# check that heights differ for different ellipsoids
# need different tolerance, since heights are relative to ~6000 km
lon, lat, h = self.location.to_geodetic(ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m8(h.value, self.h.value)
else:
# Some heights are very similar for some; some lon, lat identical.
assert not np.all(isclose_m8(h.value, self.h.value))
# given lon, lat, height, check that x,y,z differ
location = EarthLocation.from_geodetic(
self.lon, self.lat, self.h, ellipsoid=ellipsoid
)
if ellipsoid == self.location.ellipsoid:
assert allclose_m14(location.z.value, self.z.value)
else:
assert not np.all(isclose_m14(location.z.value, self.z.value))
def test_to_value(self):
loc = self.location
loc_ndarray = loc.view(np.ndarray)
assert np.all(loc.value == loc_ndarray)
loc2 = self.location.to(u.km)
loc2_ndarray = np.empty_like(loc_ndarray)
for coo in "x", "y", "z":
loc2_ndarray[coo] = loc_ndarray[coo] / 1000.0
assert np.all(loc2.value == loc2_ndarray)
loc2_value = self.location.to_value(u.km)
assert np.all(loc2_value == loc2_ndarray)
def test_pickling():
"""Regression test against #4304."""
el = EarthLocation(0.0 * u.m, 6000 * u.km, 6000 * u.km)
s = pickle.dumps(el)
el2 = pickle.loads(s)
assert el == el2
def test_repr_latex():
"""
Regression test for issue #4542
"""
somelocation = EarthLocation(lon="149:3:57.9", lat="-31:16:37.3")
somelocation._repr_latex_()
somelocation2 = EarthLocation(lon=[1.0, 2.0] * u.deg, lat=[-1.0, 9.0] * u.deg)
somelocation2._repr_latex_()
@pytest.mark.remote_data
# TODO: this parametrize should include a second option with a valid Google API
# key. For example, we should make an API key for Astropy, and add it to GitHub Actions
# as an environment variable (for security).
@pytest.mark.parametrize("google_api_key", [None])
def test_of_address(google_api_key):
NYC_lon = -74.0 * u.deg
NYC_lat = 40.7 * u.deg
# ~10 km tolerance to address difference between OpenStreetMap and Google
# for "New York, NY". This doesn't matter in practice because this test is
# only used to verify that the query succeeded, not that the returned
# position is precise.
NYC_tol = 0.1 * u.deg
# just a location
try:
loc = EarthLocation.of_address("New York, NY")
except NameResolveError as e:
# API limit might surface even here in CI.
if "unknown failure with" not in str(e):
pytest.xfail(str(e))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert np.allclose(loc.height.value, 0.0)
# Put this one here as buffer to get around Google map API limit per sec.
# no match: This always raises NameResolveError
with pytest.raises(NameResolveError):
EarthLocation.of_address("lkjasdflkja")
if google_api_key is not None:
# a location and height
try:
loc = EarthLocation.of_address("New York, NY", get_height=True)
except NameResolveError as e:
# Buffer above sometimes insufficient to get around API limit but
# we also do not want to drag things out with time.sleep(0.195),
# where 0.195 was empirically determined on some physical machine.
pytest.xfail(str(e.value))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert quantity_allclose(loc.height, 10.438 * u.meter, atol=1.0 * u.cm)
def test_geodetic_tuple():
lat = 2 * u.deg
lon = 10 * u.deg
height = 100 * u.m
el = EarthLocation.from_geodetic(lat=lat, lon=lon, height=height)
res1 = el.to_geodetic()
res2 = el.geodetic
assert res1.lat == res2.lat and quantity_allclose(res1.lat, lat)
assert res1.lon == res2.lon and quantity_allclose(res1.lon, lon)
assert res1.height == res2.height and quantity_allclose(res1.height, height)
def test_gravitational_redshift():
someloc = EarthLocation(lon=-87.7 * u.deg, lat=37 * u.deg)
sometime = Time("2017-8-21 18:26:40")
zg0 = someloc.gravitational_redshift(sometime)
# should be of order ~few mm/s change per week
zg_week = someloc.gravitational_redshift(sometime + 7 * u.day)
assert 1.0 * u.mm / u.s < abs(zg_week - zg0) < 1 * u.cm / u.s
# ~cm/s over a half-year
zg_halfyear = someloc.gravitational_redshift(sometime + 0.5 * u.yr)
assert 1 * u.cm / u.s < abs(zg_halfyear - zg0) < 1 * u.dm / u.s
# but when back to the same time in a year, should be tenths of mm
# even over decades
zg_year = someloc.gravitational_redshift(sometime - 20 * u.year)
assert 0.1 * u.mm / u.s < abs(zg_year - zg0) < 1 * u.mm / u.s
# Check mass adjustments.
# If Jupiter and the moon are ignored, effect should be off by ~ .5 mm/s
masses = {
"sun": constants.G * constants.M_sun,
"jupiter": 0 * constants.G * u.kg,
"moon": 0 * constants.G * u.kg,
}
zg_moonjup = someloc.gravitational_redshift(sometime, masses=masses)
assert 0.1 * u.mm / u.s < abs(zg_moonjup - zg0) < 1 * u.mm / u.s
# Check that simply not including the bodies gives the same result.
assert zg_moonjup == someloc.gravitational_redshift(sometime, bodies=("sun",))
# And that earth can be given, even not as last argument
assert zg_moonjup == someloc.gravitational_redshift(
sometime, bodies=("earth", "sun")
)
# If the earth is also ignored, effect should be off by ~ 20 cm/s
# This also tests the conversion of kg to gravitational units.
masses["earth"] = 0 * u.kg
zg_moonjupearth = someloc.gravitational_redshift(sometime, masses=masses)
assert 1 * u.dm / u.s < abs(zg_moonjupearth - zg0) < 1 * u.m / u.s
# If all masses are zero, redshift should be 0 as well.
masses["sun"] = 0 * u.kg
assert someloc.gravitational_redshift(sometime, masses=masses) == 0
with pytest.raises(KeyError):
someloc.gravitational_redshift(sometime, bodies=("saturn",))
with pytest.raises(u.UnitsError):
masses = {
"sun": constants.G * constants.M_sun,
"jupiter": constants.G * constants.M_jup,
"moon": 1 * u.km, # wrong units!
"earth": constants.G * constants.M_earth,
}
someloc.gravitational_redshift(sometime, masses=masses)
def test_read_only_input():
lon = np.array([80.0, 440.0]) * u.deg
lat = np.array([45.0]) * u.deg
lon.flags.writeable = lat.flags.writeable = False
loc = EarthLocation.from_geodetic(lon=lon, lat=lat)
assert quantity_allclose(loc[1].x, loc[0].x)
def test_info():
EarthLocation._get_site_registry(force_builtin=True)
greenwich = EarthLocation.of_site("greenwich")
assert str(greenwich.info).startswith("name = Royal Observatory Greenwich")
|
98e9a890c64e08a19084e7dc70b8f64509cdb7e76ccc3c7f07d2198c7afa02e2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Cartesian representations and differentials."""
import numpy as np
from erfa import ufunc as erfa_ufunc
import astropy.units as u
from .base import BaseDifferential, BaseRepresentation
class CartesianRepresentation(BaseRepresentation):
"""
Representation of points in 3D cartesian coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z``
have different shapes, they should be broadcastable. If not quantity,
``unit`` should be set. If only ``x`` is given, it is assumed that it
contains an array with the 3 coordinates stored along ``xyz_axis``.
unit : unit-like
If given, the coordinates will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided rather than distinct ``x``, ``y``, and ``z`` (default: 0).
differentials : dict, `~astropy.coordinates.CartesianDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.CartesianDifferential` instance, or a dictionary of
`~astropy.coordinates.CartesianDifferential` s with keys set to a string representation of
the SI unit with which the differential (derivative) is taken. For
example, for a velocity differential on a positional representation, the
key would be ``'s'`` for seconds, indicating that the derivative is a
time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"x": u.Quantity, "y": u.Quantity, "z": u.Quantity}
_xyz = None
def __init__(
self, x, y=None, z=None, unit=None, xyz_axis=None, differentials=None, copy=True
):
if y is None and z is None:
if isinstance(x, np.ndarray) and x.dtype.kind not in "OV":
# Short-cut for 3-D array input.
x = u.Quantity(x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._xyz = x
if xyz_axis:
x = np.moveaxis(x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._x, self._y, self._z = x
self._differentials = self._validate_differentials(differentials)
return
elif (
isinstance(x, CartesianRepresentation)
and unit is None
and xyz_axis is None
):
if differentials is None:
differentials = x._differentials
return super().__init__(x, differentials=differentials, copy=copy)
else:
x, y, z = x
if xyz_axis is not None:
raise ValueError(
"xyz_axis should only be set if x, y, and z are in a single array"
" passed in through x, i.e., y and z should not be not given."
)
if y is None or z is None:
raise ValueError(
f"x, y, and z are required to instantiate {self.__class__.__name__}"
)
if unit is not None:
x = u.Quantity(x, unit, copy=copy, subok=True)
y = u.Quantity(y, unit, copy=copy, subok=True)
z = u.Quantity(z, unit, copy=copy, subok=True)
copy = False
super().__init__(x, y, z, copy=copy, differentials=differentials)
if not (
self._x.unit.is_equivalent(self._y.unit)
and self._x.unit.is_equivalent(self._z.unit)
):
raise u.UnitsError("x, y, and z should have matching physical types")
def unit_vectors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
o = np.broadcast_to(0.0 * u.one, self.shape, subok=True)
return {
"x": CartesianRepresentation(l, o, o, copy=False),
"y": CartesianRepresentation(o, l, o, copy=False),
"z": CartesianRepresentation(o, o, l, copy=False),
}
def scale_factors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"x": l, "y": l, "z": l}
def get_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._xyz is not None:
if self._xyz_axis == xyz_axis:
return self._xyz
else:
return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._x, self._y, self._z], axis=xyz_axis)
xyz = property(get_xyz)
@classmethod
def from_cartesian(cls, other):
return other
def to_cartesian(self):
return self
def transform(self, matrix):
"""
Transform the cartesian coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : ndarray
A 3x3 transformation matrix, such as a rotation matrix.
Examples
--------
We can start off by creating a cartesian representation object:
>>> from astropy import units as u
>>> from astropy.coordinates import CartesianRepresentation
>>> rep = CartesianRepresentation([1, 2] * u.pc,
... [2, 3] * u.pc,
... [3, 4] * u.pc)
We now create a rotation matrix around the z axis:
>>> from astropy.coordinates.matrix_utilities import rotation_matrix
>>> rotation = rotation_matrix(30 * u.deg, axis='z')
Finally, we can apply this transformation:
>>> rep_new = rep.transform(rotation)
>>> rep_new.xyz # doctest: +FLOAT_CMP
<Quantity [[ 1.8660254 , 3.23205081],
[ 1.23205081, 1.59807621],
[ 3. , 4. ]] pc>
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1))
# transformed representation
rep = self.__class__(p, xyz_axis=-1, copy=False)
# Handle differentials attached to this representation
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
try:
other_c = other.to_cartesian()
except Exception:
return NotImplemented
first, second = (self, other_c) if not reverse else (other_c, self)
return self.__class__(
*(
op(getattr(first, component), getattr(second, component))
for component in first.components
)
)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# erfa pm: Modulus of p-vector.
return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1))
def mean(self, *args, **kwargs):
"""Vector mean.
Returns a new CartesianRepresentation instance with the means of the
x, y, and z components.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._apply("mean", *args, **kwargs)
def sum(self, *args, **kwargs):
"""Vector sum.
Returns a new CartesianRepresentation instance with the sums of the
x, y, and z components.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._apply("sum", *args, **kwargs)
def dot(self, other):
"""Dot product of two representations.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of ``self``
and ``other``.
"""
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"can only take dot product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pdp: p-vector inner (=scalar=dot) product.
return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
def cross(self, other):
"""Cross product of two representations.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
cross_product : `~astropy.coordinates.CartesianRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"cannot only take cross product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pxp: p-vector outer (=vector=cross) product.
sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
return self.__class__(sxo, xyz_axis=-1)
class CartesianDifferential(BaseDifferential):
"""Differentials in of points in 3D cartesian coordinates.
Parameters
----------
d_x, d_y, d_z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``,
and ``d_z`` have different shapes, they should be broadcastable. If not
quantities, ``unit`` should be set. If only ``d_x`` is given, it is
assumed that it contains an array with the 3 coordinates stored along
``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the differentials will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CartesianRepresentation
_d_xyz = None
def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None, copy=True):
if d_y is None and d_z is None:
if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in "OV":
# Short-cut for 3-D array input.
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._d_xyz = d_x
if xyz_axis:
d_x = np.moveaxis(d_x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._d_x, self._d_y, self._d_z = d_x
return
else:
d_x, d_y, d_z = d_x
if xyz_axis is not None:
raise ValueError(
"xyz_axis should only be set if d_x, d_y, and d_z are in a single array"
" passed in through d_x, i.e., d_y and d_z should not be not given."
)
if d_y is None or d_z is None:
raise ValueError(
"d_x, d_y, and d_z are required to instantiate"
f" {self.__class__.__name__}"
)
if unit is not None:
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
d_y = u.Quantity(d_y, unit, copy=copy, subok=True)
d_z = u.Quantity(d_z, unit, copy=copy, subok=True)
copy = False
super().__init__(d_x, d_y, d_z, copy=copy)
if not (
self._d_x.unit.is_equivalent(self._d_y.unit)
and self._d_x.unit.is_equivalent(self._d_z.unit)
):
raise u.UnitsError("d_x, d_y and d_z should have equivalent units.")
def to_cartesian(self, base=None):
return CartesianRepresentation(*[getattr(self, c) for c in self.components])
@classmethod
def from_cartesian(cls, other, base=None):
return cls(*[getattr(other, c) for c in other.components])
def transform(self, matrix, base=None, transformed_base=None):
"""Transform differentials using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base, transformed_base : `~astropy.coordinates.CartesianRepresentation` or None, optional
Not used in the Cartesian transformation.
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_d_xyz(xyz_axis=-1))
return self.__class__(p, xyz_axis=-1, copy=False)
def get_d_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
d_xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._d_xyz is not None:
if self._xyz_axis == xyz_axis:
return self._d_xyz
else:
return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _d_xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._d_x, self._d_y, self._d_z], axis=xyz_axis)
d_xyz = property(get_d_xyz)
|
20fc29eb0a24f80f550f2f2a2bbfd049ec3742fa0cd84a79008831ffce97d103 | """
In this module, we define the coordinate representation classes, which are
used to represent low-level cartesian, spherical, cylindrical, and other
coordinates.
"""
from .base import BaseDifferential, BaseRepresentation, BaseRepresentationOrDifferential
from .cartesian import CartesianDifferential, CartesianRepresentation
from .cylindrical import CylindricalDifferential, CylindricalRepresentation
from .geodetic import (
BaseBodycentricRepresentation,
BaseGeodeticRepresentation,
GRS80GeodeticRepresentation,
WGS72GeodeticRepresentation,
WGS84GeodeticRepresentation,
)
from .spherical import (
BaseSphericalCosLatDifferential,
BaseSphericalDifferential,
PhysicsSphericalDifferential,
PhysicsSphericalRepresentation,
RadialDifferential,
RadialRepresentation,
SphericalCosLatDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
# The following imports are included for backwards compatibility.
# isort: split
from .base import (
DIFFERENTIAL_CLASSES,
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
get_reprdiff_cls_hash,
)
__all__ = [
"BaseRepresentationOrDifferential",
"BaseRepresentation",
"CartesianRepresentation",
"SphericalRepresentation",
"UnitSphericalRepresentation",
"RadialRepresentation",
"PhysicsSphericalRepresentation",
"CylindricalRepresentation",
"BaseDifferential",
"CartesianDifferential",
"BaseSphericalDifferential",
"BaseSphericalCosLatDifferential",
"SphericalDifferential",
"SphericalCosLatDifferential",
"UnitSphericalDifferential",
"UnitSphericalCosLatDifferential",
"RadialDifferential",
"CylindricalDifferential",
"PhysicsSphericalDifferential",
"BaseGeodeticRepresentation",
"BaseBodycentricRepresentation",
"WGS84GeodeticRepresentation",
"WGS72GeodeticRepresentation",
"GRS80GeodeticRepresentation",
]
|
025218a6a852d471b6a5607e6d822ad514c6965e1043d8f868da4a1b730d82ca | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import erfa
import numpy as np
from astropy import units as u
from astropy.coordinates.angles import Latitude, Longitude
from astropy.utils.decorators import format_doc
from .base import BaseRepresentation
from .cartesian import CartesianRepresentation
ELLIPSOIDS = {}
"""Available ellipsoids (defined in erfam.h, with numbers exposed in erfa)."""
# Note: they get filled by the creation of the geodetic classes.
geodetic_base_doc = """{__doc__}
Parameters
----------
lon, lat : angle-like
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle` and either
`~astropy.coordinates.Longitude` not `~astropy.coordinates.Latitude`,
depending on the parameter.
height : `~astropy.units.Quantity` ['length']
The height to the point(s).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
@format_doc(geodetic_base_doc)
class BaseGeodeticRepresentation(BaseRepresentation):
"""
Base class for geodetic representations.
Subclasses need to set attributes ``_equatorial_radius`` and ``_flattening``
to quantities holding correct values (with units of length and dimensionless,
respectively), or alternatively an ``_ellipsoid`` attribute to the relevant ERFA
index (as passed in to `erfa.eform`). The geodetic latitude is defined by the
angle between the vertical to the surface at a specific point of the spheroid and
its projection onto the equatorial plane.
"""
attr_classes = {"lon": Longitude, "lat": Latitude, "height": u.Quantity}
def __init_subclass__(cls, **kwargs):
if "_ellipsoid" in cls.__dict__:
equatorial_radius, flattening = erfa.eform(getattr(erfa, cls._ellipsoid))
cls._equatorial_radius = equatorial_radius * u.m
cls._flattening = flattening * u.dimensionless_unscaled
ELLIPSOIDS[cls._ellipsoid] = cls
elif (
"_equatorial_radius" not in cls.__dict__
or "_flattening" not in cls.__dict__
):
raise AttributeError(
f"{cls.__name__} requires '_ellipsoid' or '_equatorial_radius' and '_flattening'."
)
super().__init_subclass__(**kwargs)
def __init__(self, lon, lat=None, height=None, copy=True):
if height is None and not isinstance(lon, self.__class__):
height = 0 << u.m
super().__init__(lon, lat, height, copy=copy)
if not self.height.unit.is_equivalent(u.m):
raise u.UnitTypeError(
f"{self.__class__.__name__} requires height with units of length."
)
def to_cartesian(self):
"""
Converts geodetic coordinates to 3D rectangular (geocentric)
cartesian coordinates.
"""
xyz = erfa.gd2gce(
self._equatorial_radius,
self._flattening,
self.lon,
self.lat,
self.height,
)
return CartesianRepresentation(xyz, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates (assumed geocentric) to
geodetic coordinates.
"""
# Compute geodetic/planetodetic angles
lon, lat, height = erfa.gc2gde(
cls._equatorial_radius, cls._flattening, cart.get_xyz(xyz_axis=-1)
)
return cls(lon, lat, height, copy=False)
@format_doc(geodetic_base_doc)
class BaseBodycentricRepresentation(BaseRepresentation):
"""Representation of points in bodycentric 3D coordinates.
Subclasses need to set attributes ``_equatorial_radius`` and ``_flattening``
to quantities holding correct values (with units of length and dimensionless,
respectively). the bodycentric latitude and longitude are spherical latitude
and longitude relative to the barycenter of the body.
"""
attr_classes = {"lon": Longitude, "lat": Latitude, "height": u.Quantity}
def __init_subclass__(cls, **kwargs):
if (
"_equatorial_radius" not in cls.__dict__
or "_flattening" not in cls.__dict__
):
raise AttributeError(
f"{cls.__name__} requires '_equatorial_radius' and '_flattening'."
)
super().__init_subclass__(**kwargs)
def __init__(self, lon, lat=None, height=None, copy=True):
if height is None and not isinstance(lon, self.__class__):
height = 0 << u.m
super().__init__(lon, lat, height, copy=copy)
if not self.height.unit.is_equivalent(u.m):
raise u.UnitTypeError(
f"{self.__class__.__name__} requires height with units of length."
)
def to_cartesian(self):
"""
Converts bodycentric coordinates to 3D rectangular (geocentric)
cartesian coordinates.
"""
coslat = np.cos(self.lat)
sinlat = np.sin(self.lat)
coslon = np.cos(self.lon)
sinlon = np.sin(self.lon)
r = (
self._equatorial_radius * np.hypot(coslat, (1 - self._flattening) * sinlat)
+ self.height
)
x = r * coslon * coslat
y = r * sinlon * coslat
z = r * sinlat
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates (assumed geocentric) to
bodycentric coordinates.
"""
# Compute bodycentric latitude
p = np.hypot(cart.x, cart.y)
d = np.hypot(p, cart.z)
lat = np.arctan2(cart.z, p)
p_spheroid = cls._equatorial_radius * np.cos(lat)
z_spheroid = cls._equatorial_radius * (1 - cls._flattening) * np.sin(lat)
r_spheroid = np.hypot(p_spheroid, z_spheroid)
height = d - r_spheroid
lon = np.arctan2(cart.y, cart.x)
return cls(lon, lat, height, copy=False)
@format_doc(geodetic_base_doc)
class WGS84GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS84 3D geodetic coordinates."""
_ellipsoid = "WGS84"
@format_doc(geodetic_base_doc)
class WGS72GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS72 3D geodetic coordinates."""
_ellipsoid = "WGS72"
@format_doc(geodetic_base_doc)
class GRS80GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in GRS80 3D geodetic coordinates."""
_ellipsoid = "GRS80"
|
79ace632615f7f72637943eaba19b80a4d2bfef44e11f4b8cc15cb1090916a40 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Base classes for representations and differentials."""
import abc
import functools
import operator
import warnings
import numpy as np
import astropy.units as u
from astropy.coordinates.angles import Angle
from astropy.utils import ShapedLikeNDArray, classproperty
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import DuplicateRepresentationWarning
# Module-level dict mapping representation string alias names to classes.
# This is populated by __init_subclass__ when called by Representation or
# Differential classes so that they are all registered automatically.
REPRESENTATION_CLASSES = {}
DIFFERENTIAL_CLASSES = {}
# set for tracking duplicates
DUPLICATE_REPRESENTATIONS = set()
# a hash for the content of the above two dicts, cached for speed.
_REPRDIFF_HASH = None
def _fqn_class(cls):
"""Get the fully qualified name of a class."""
return cls.__module__ + "." + cls.__qualname__
def get_reprdiff_cls_hash():
"""
Returns a hash value that should be invariable if the
`REPRESENTATION_CLASSES` and `DIFFERENTIAL_CLASSES` dictionaries have not
changed.
"""
global _REPRDIFF_HASH
if _REPRDIFF_HASH is None:
_REPRDIFF_HASH = hash(tuple(REPRESENTATION_CLASSES.items())) + hash(
tuple(DIFFERENTIAL_CLASSES.items())
)
return _REPRDIFF_HASH
def _invalidate_reprdiff_cls_hash():
global _REPRDIFF_HASH
_REPRDIFF_HASH = None
class BaseRepresentationOrDifferentialInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Indicates unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
# Create numpy dtype so that numpy formatting will work.
components = val.components
values = tuple(getattr(val, component).value for component in components)
a = np.empty(
getattr(val, "shape", ()),
[(component, value.dtype) for component, value in zip(components, values)],
)
for component, value in zip(components, values):
a[component] = value
return str(a)
@property
def _represent_as_dict_attrs(self):
return self._parent.components
@property
def unit(self):
if self._parent is None:
return None
unit = self._parent._unitstr
return unit[1:-1] if unit.startswith("(") else unit
def new_like(self, reps, length, metadata_conflicts="warn", name=None):
"""
Return a new instance like ``reps`` with ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
reps : list
List of input representations or differentials.
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential` subclass instance
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
reps, metadata_conflicts, name, ("meta", "description")
)
# Make a new representation or differential with the desired length
# using the _apply / __getitem__ machinery to effectively return
# rep0[[0, 0, ..., 0, 0]]. This will have the right shape, and
# include possible differentials.
indexes = np.zeros(length, dtype=np.int64)
out = reps[0][indexes]
# Use __setitem__ machinery to check whether all representations
# can represent themselves as this one without loss of information.
for rep in reps[1:]:
try:
out[0] = rep[0]
except Exception as err:
raise ValueError("input representations are inconsistent.") from err
# Set (merged) info attributes.
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class BaseRepresentationOrDifferential(ShapedLikeNDArray):
"""3D coordinate representations and differentials.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D point or differential. The names are the
keys and the subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied; if `False`, they will be
broadcast together but not use new memory.
"""
# Ensure multiplication/division with ndarray or Quantity doesn't lead to
# object arrays.
__array_priority__ = 50000
info = BaseRepresentationOrDifferentialInfo()
def __init__(self, *args, **kwargs):
# make argument a list, so we can pop them off.
args = list(args)
components = self.components
if (
args
and isinstance(args[0], self.__class__)
and all(arg is None for arg in args[1:])
):
rep_or_diff = args[0]
copy = kwargs.pop("copy", True)
attrs = [getattr(rep_or_diff, component) for component in components]
if "info" in rep_or_diff.__dict__:
self.info = rep_or_diff.info
if kwargs:
raise TypeError(
"unexpected keyword arguments for case "
f"where class instance is passed in: {kwargs}"
)
else:
attrs = []
for component in components:
try:
attr = args.pop(0) if args else kwargs.pop(component)
except KeyError:
raise TypeError(
"__init__() missing 1 required positional "
f"argument: {component!r}"
) from None
if attr is None:
raise TypeError(
"__init__() missing 1 required positional argument:"
f" {component!r} (or first argument should be an instance of"
f" {self.__class__.__name__})."
)
attrs.append(attr)
copy = args.pop(0) if args else kwargs.pop("copy", True)
if args:
raise TypeError(f"unexpected arguments: {args}")
if kwargs:
for component in components:
if component in kwargs:
raise TypeError(
f"__init__() got multiple values for argument {component!r}"
)
raise TypeError(f"unexpected keyword arguments: {kwargs}")
# Pass attributes through the required initializing classes.
attrs = [
self.attr_classes[component](attr, copy=copy, subok=True)
for component, attr in zip(components, attrs)
]
try:
bc_attrs = np.broadcast_arrays(*attrs, subok=True)
except ValueError as err:
if len(components) <= 2:
c_str = " and ".join(components)
else:
c_str = ", ".join(components[:2]) + ", and " + components[2]
raise ValueError(f"Input parameters {c_str} cannot be broadcast") from err
# The output of np.broadcast_arrays() has limitations on writeability, so we perform
# additional handling to enable writeability in most situations. This is primarily
# relevant for allowing the changing of the wrap angle of longitude components.
#
# If the shape has changed for a given component, broadcasting is needed:
# If copy=True, we make a copy of the broadcasted array to ensure writeability.
# Note that array had already been copied prior to the broadcasting.
# TODO: Find a way to avoid the double copy.
# If copy=False, we use the broadcasted array, and writeability may still be
# limited.
# If the shape has not changed for a given component, we can proceed with using the
# non-broadcasted array, which avoids writeability issues from np.broadcast_arrays().
attrs = [
(bc_attr.copy() if copy else bc_attr)
if bc_attr.shape != attr.shape
else attr
for attr, bc_attr in zip(attrs, bc_attrs)
]
# Set private attributes for the attributes. (If not defined explicitly
# on the class, the metaclass will define properties to access these.)
for component, attr in zip(components, attrs):
setattr(self, "_" + component, attr)
@classmethod
def get_name(cls):
"""Name of the representation or differential.
In lower case, with any trailing 'representation' or 'differential'
removed. (E.g., 'spherical' for
`~astropy.coordinates.SphericalRepresentation` or
`~astropy.coordinates.SphericalDifferential`.)
"""
name = cls.__name__.lower()
if name.endswith("representation"):
name = name[:-14]
elif name.endswith("differential"):
name = name[:-12]
return name
# The two methods that any subclass has to define.
@classmethod
@abc.abstractmethod
def from_cartesian(cls, other):
"""Create a representation of this class from a supplied Cartesian one.
Parameters
----------
other : `~astropy.coordinates.CartesianRepresentation`
The representation to turn into this class
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` subclass instance
A new representation of this class's type.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@abc.abstractmethod
def to_cartesian(self):
"""Convert the representation to its Cartesian form.
Note that any differentials get dropped.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. For example, transforming
an angular position defined at distance=0 through cartesian coordinates
and back will lose the original angular coordinates::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> rep = coord.SphericalRepresentation(
... lon=15*u.deg,
... lat=-11*u.deg,
... distance=0*u.pc)
>>> rep.to_cartesian().represent_as(coord.SphericalRepresentation)
<SphericalRepresentation (lon, lat, distance) in (rad, rad, pc)
(0., 0., 0.)>
Returns
-------
cartrepr : `~astropy.coordinates.CartesianRepresentation`
The representation in Cartesian form.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@property
def components(self):
"""A tuple with the in-order names of the coordinate components."""
return tuple(self.attr_classes)
def __eq__(self, value):
"""Equality operator.
This implements strict equality and requires that the representation
classes are identical and that the representation data are exactly equal.
"""
if self.__class__ is not value.__class__:
raise TypeError(
"cannot compare: objects must have same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
try:
np.broadcast(self, value)
except ValueError as exc:
raise ValueError(f"cannot compare: {exc}") from exc
out = True
for comp in self.components:
out &= getattr(self, "_" + comp) == getattr(value, "_" + comp)
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation or differential with ``method`` applied
to the component data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for
`~astropy.coordinates.CartesianRepresentation`), with the results used
to create a new instance.
Internally, it is also used to apply functions to the components
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
new = super().__new__(self.__class__)
for component in self.components:
setattr(new, "_" + component, apply_method(getattr(self, component)))
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
if value.__class__ is not self.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
for component in self.components:
getattr(self, "_" + component)[item] = getattr(value, "_" + component)
@property
def shape(self):
"""The shape of the instance and underlying arrays.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of any of the components cannot be changed without the
arrays being copied. For these cases, use the ``reshape`` method
(which copies any arrays that cannot be reshaped in-place).
"""
return getattr(self, self.components[0]).shape
@shape.setter
def shape(self, shape):
# We keep track of arrays that were already reshaped since we may have
# to return those to their original shape if a later shape-setting
# fails. (This can happen since coordinates are broadcast together.)
reshaped = []
oldshape = self.shape
for component in self.components:
val = getattr(self, component)
if val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
# Required to support multiplication and division, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _scale_operation(self, op, *args):
raise NotImplementedError()
def __mul__(self, other):
return self._scale_operation(operator.mul, other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._scale_operation(operator.truediv, other)
def __neg__(self):
return self._scale_operation(operator.neg)
# Follow numpy convention and make an independent copy.
def __pos__(self):
return self.copy()
# Required to support addition and subtraction, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _combine_operation(self, op, other, reverse=False):
raise NotImplementedError()
def __add__(self, other):
return self._combine_operation(operator.add, other)
def __radd__(self, other):
return self._combine_operation(operator.add, other, reverse=True)
def __sub__(self, other):
return self._combine_operation(operator.sub, other)
def __rsub__(self, other):
return self._combine_operation(operator.sub, other, reverse=True)
# The following are used for repr and str
@property
def _values(self):
"""Turn the coordinates into a record array with the coordinate values.
The record array fields will have the component names.
"""
coo_items = [(c, getattr(self, c)) for c in self.components]
result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items])
for c, coo in coo_items:
result[c] = coo.value
return result
@property
def _units(self):
"""Return a dictionary with the units of the coordinate components."""
return {cmpnt: getattr(self, cmpnt).unit for cmpnt in self.components}
@property
def _unitstr(self):
units_set = set(self._units.values())
if len(units_set) == 1:
unitstr = units_set.pop().to_string()
else:
unitstr = "({})".format(
", ".join(
self._units[component].to_string() for component in self.components
)
)
return unitstr
def __str__(self):
return f"{np.array2string(self._values, separator=', ')} {self._unitstr:s}"
def __repr__(self):
prefixstr = " "
arrstr = np.array2string(self._values, prefix=prefixstr, separator=", ")
diffstr = ""
if getattr(self, "differentials", None):
diffstr = "\n (has differentials w.r.t.: {})".format(
", ".join([repr(key) for key in self.differentials.keys()])
)
unitstr = ("in " + self._unitstr) if self._unitstr else "[dimensionless]"
return (
f"<{self.__class__.__name__} ({', '.join(self.components)})"
f" {unitstr:s}\n{prefixstr}{arrstr}{diffstr}>"
)
def _make_getter(component):
"""Make an attribute getter for use in a property.
Parameters
----------
component : str
The name of the component that should be accessed. This assumes the
actual value is stored in an attribute of that name prefixed by '_'.
"""
# This has to be done in a function to ensure the reference to component
# is not lost/redirected.
component = "_" + component
def get_component(self):
return getattr(self, component)
return get_component
class RepresentationInfo(BaseRepresentationOrDifferentialInfo):
@property
def _represent_as_dict_attrs(self):
attrs = super()._represent_as_dict_attrs
if self._parent._differentials:
attrs += ("differentials",)
return attrs
def _represent_as_dict(self, attrs=None):
out = super()._represent_as_dict(attrs)
for key, value in out.pop("differentials", {}).items():
out[f"differentials.{key}"] = value
return out
def _construct_from_dict(self, map):
differentials = {}
for key in list(map.keys()):
if key.startswith("differentials."):
differentials[key[14:]] = map.pop(key)
map["differentials"] = differentials
return super()._construct_from_dict(map)
class BaseRepresentation(BaseRepresentationOrDifferential):
"""Base for representing a point in a 3D coordinate system.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D points. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
subclass instance, or a dictionary with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All representation classes should subclass this base representation class,
and define an ``attr_classes`` attribute, a `dict`
which maps component names to the class that creates them. They must also
define a ``to_cartesian`` method and a ``from_cartesian`` class method. By
default, transformations are done via the cartesian system, but classes
that want to define a smarter transformation path can overload the
``represent_as`` method. If one wants to use an associated differential
class, one should also define ``unit_vectors`` and ``scale_factors``
methods (see those methods for details).
"""
info = RepresentationInfo()
def __init_subclass__(cls, **kwargs):
# Register representation name (except for BaseRepresentation)
if cls.__name__ == "BaseRepresentation":
return
if not hasattr(cls, "attr_classes"):
raise NotImplementedError(
'Representations must have an "attr_classes" class attribute.'
)
repr_name = cls.get_name()
# first time a duplicate is added
# remove first entry and add both using their qualnames
if repr_name in REPRESENTATION_CLASSES:
DUPLICATE_REPRESENTATIONS.add(repr_name)
fqn_cls = _fqn_class(cls)
existing = REPRESENTATION_CLASSES[repr_name]
fqn_existing = _fqn_class(existing)
if fqn_cls == fqn_existing:
raise ValueError(f'Representation "{fqn_cls}" already defined')
msg = (
f'Representation "{repr_name}" already defined, removing it to avoid'
f' confusion.Use qualnames "{fqn_cls}" and "{fqn_existing}" or class'
" instances directly"
)
warnings.warn(msg, DuplicateRepresentationWarning)
del REPRESENTATION_CLASSES[repr_name]
REPRESENTATION_CLASSES[fqn_existing] = existing
repr_name = fqn_cls
# further definitions with the same name, just add qualname
elif repr_name in DUPLICATE_REPRESENTATIONS:
fqn_cls = _fqn_class(cls)
warnings.warn(
f'Representation "{repr_name}" already defined, using qualname '
f'"{fqn_cls}".'
)
repr_name = fqn_cls
if repr_name in REPRESENTATION_CLASSES:
raise ValueError(f'Representation "{repr_name}" already defined')
REPRESENTATION_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# define getters for any component that does not yet have one.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(
cls,
component,
property(
_make_getter(component),
doc=f"The '{component}' component of the points(s).",
),
)
super().__init_subclass__(**kwargs)
def __init__(self, *args, differentials=None, **kwargs):
# Handle any differentials passed in.
super().__init__(*args, **kwargs)
if differentials is None and args and isinstance(args[0], self.__class__):
differentials = args[0]._differentials
self._differentials = self._validate_differentials(differentials)
def _validate_differentials(self, differentials):
"""
Validate that the provided differentials are appropriate for this
representation and recast/reshape as necessary and then return.
Note that this does *not* set the differentials on
``self._differentials``, but rather leaves that for the caller.
"""
from .spherical import RadialDifferential, UnitSphericalRepresentation
# Now handle the actual validation of any specified differential classes
if differentials is None:
differentials = dict()
elif isinstance(differentials, BaseDifferential):
# We can't handle auto-determining the key for this combo
if isinstance(differentials, RadialDifferential) and isinstance(
self, UnitSphericalRepresentation
):
raise ValueError(
"To attach a RadialDifferential to a UnitSphericalRepresentation,"
" you must supply a dictionary with an appropriate key."
)
key = differentials._get_deriv_key(self)
differentials = {key: differentials}
for key in differentials:
try:
diff = differentials[key]
except TypeError as err:
raise TypeError(
"'differentials' argument must be a dictionary-like object"
) from err
diff._check_base(self)
if isinstance(diff, RadialDifferential) and isinstance(
self, UnitSphericalRepresentation
):
# We trust the passing of a key for a RadialDifferential
# attached to a UnitSphericalRepresentation because it will not
# have a paired component name (UnitSphericalRepresentation has
# no .distance) to automatically determine the expected key
pass
else:
expected_key = diff._get_deriv_key(self)
if key != expected_key:
raise ValueError(
f"For differential object '{repr(diff)}', expected "
f"unit key = '{expected_key}' but received key = '{key}'"
)
# For now, we are very rigid: differentials must have the same shape
# as the representation. This makes it easier to handle __getitem__
# and any other shape-changing operations on representations that
# have associated differentials
if diff.shape != self.shape:
# TODO: message of IncompatibleShapeError is not customizable,
# so use a valueerror instead?
raise ValueError(
"Shape of differentials must be the same "
f"as the shape of the representation ({diff.shape} vs {self.shape})"
)
return differentials
def _raise_if_has_differentials(self, op_name):
"""
Used to raise a consistent exception for any operation that is not
supported when a representation has differentials attached.
"""
if self.differentials:
raise TypeError(
f"Operation '{op_name}' is not supported when "
f"differentials are attached to a {self.__class__.__name__}."
)
@classproperty
def _compatible_differentials(cls):
return [DIFFERENTIAL_CLASSES[cls.get_name()]]
@property
def differentials(self):
"""A dictionary of differential class instances.
The keys of this dictionary must be a string representation of the SI
unit with which the differential (derivative) is taken. For example, for
a velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
"""
return self._differentials
# We do not make unit_vectors and scale_factors abstract methods, since
# they are only necessary if one also defines an associated Differential.
# Also, doing so would break pre-differential representation subclasses.
def unit_vectors(self):
r"""Cartesian unit vectors in the direction of each component.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented unit vectors")
def scale_factors(self):
r"""Scale factors for each component's direction.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
scale_factors : dict of `~astropy.units.Quantity`
The keys are the component names.
"""
raise NotImplementedError(f"{type(self)} has not implemented scale factors.")
def _re_represent_differentials(self, new_rep, differential_class):
"""Re-represent the differentials to the specified classes.
This returns a new dictionary with the same keys but with the
attached differentials converted to the new differential classes.
"""
if differential_class is None:
return dict()
if not self.differentials and differential_class:
raise ValueError("No differentials associated with this representation!")
elif (
len(self.differentials) == 1
and isinstance(differential_class, type)
and issubclass(differential_class, BaseDifferential)
):
# TODO: is there a better way to do this?
differential_class = {
list(self.differentials.keys())[0]: differential_class
}
elif differential_class.keys() != self.differentials.keys():
raise ValueError(
"Desired differential classes must be passed in as a dictionary with"
" keys equal to a string representation of the unit of the derivative"
" for each differential stored with this "
f"representation object ({self.differentials})"
)
new_diffs = dict()
for k in self.differentials:
diff = self.differentials[k]
try:
new_diffs[k] = diff.represent_as(differential_class[k], base=self)
except Exception as err:
if differential_class[k] not in new_rep._compatible_differentials:
raise TypeError(
f"Desired differential class {differential_class[k]} is not "
"compatible with the desired "
f"representation class {new_rep.__class__}"
) from err
else:
raise
return new_diffs
def represent_as(self, other_class, differential_class=None):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via Cartesian coordinates.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. See the docstring for
:meth:`~astropy.coordinates.BaseRepresentationOrDifferential.to_cartesian`
for an example.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional
Classes in which the differentials should be represented.
Can be a single class if only a single differential is attached,
otherwise it should be a `dict` keyed by the same keys as the
differentials.
"""
if other_class is self.__class__ and not differential_class:
return self.without_differentials()
else:
if isinstance(other_class, str):
raise ValueError(
"Input to a representation's represent_as must be a class, not "
"a string. For strings, use frame objects."
)
if other_class is not self.__class__:
# The default is to convert via cartesian coordinates
new_rep = other_class.from_cartesian(self.to_cartesian())
else:
new_rep = self
new_rep._differentials = self._re_represent_differentials(
new_rep, differential_class
)
return new_rep
def transform(self, matrix):
"""Transform coordinates using a 3x3 matrix in a Cartesian basis.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
"""
from .cartesian import CartesianDifferential, CartesianRepresentation
# route transformation through Cartesian
difs_cls = {k: CartesianDifferential for k in self.differentials.keys()}
crep = self.represent_as(
CartesianRepresentation, differential_class=difs_cls
).transform(matrix)
# move back to original representation
difs_cls = {k: diff.__class__ for k, diff in self.differentials.items()}
rep = crep.represent_as(self.__class__, difs_cls)
return rep
def with_differentials(self, differentials):
"""
Create a new representation with the same positions as this
representation, but with these new differentials.
Differential keys that already exist in this object's differential dict
are overwritten.
Parameters
----------
differentials : sequence of `~astropy.coordinates.BaseDifferential` subclass instance
The differentials for the new representation to have.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A copy of this representation, but with the ``differentials`` as
its differentials.
"""
if not differentials:
return self
args = [getattr(self, component) for component in self.components]
# We shallow copy the differentials dictionary so we don't update the
# current object's dictionary when adding new keys
new_rep = self.__class__(
*args, differentials=self.differentials.copy(), copy=False
)
new_rep._differentials.update(new_rep._validate_differentials(differentials))
return new_rep
def without_differentials(self):
"""Return a copy of the representation without attached differentials.
Returns
-------
`~astropy.coordinates.BaseRepresentation` subclass instance
A shallow copy of this representation, without any differentials.
If no differentials were present, no copy is made.
"""
if not self._differentials:
return self
args = [getattr(self, component) for component in self.components]
return self.__class__(*args, copy=False)
@classmethod
def from_representation(cls, representation):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
"""
return representation.represent_as(cls)
def __eq__(self, value):
"""Equality operator for BaseRepresentation.
This implements strict equality and requires that the representation
classes are identical, the differentials are identical, and that the
representation data are exactly equal.
"""
# BaseRepresentationOrDifferental (checks classes and compares components)
out = super().__eq__(value)
# super() checks that the class is identical so can this even happen?
# (same class, different differentials ?)
if self._differentials.keys() != value._differentials.keys():
raise ValueError("cannot compare: objects must have same differentials")
for self_diff, value_diff in zip(
self._differentials.values(), value._differentials.values()
):
out &= self_diff == value_diff
return out
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new representation with ``method`` applied to the component
data.
This is not a simple inherit from ``BaseRepresentationOrDifferential``
because we need to call ``._apply()`` on any associated differential
classes.
See docstring for `BaseRepresentationOrDifferential._apply`.
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
rep = super()._apply(method, *args, **kwargs)
rep._differentials = {
k: diff._apply(method, *args, **kwargs)
for k, diff in self._differentials.items()
}
return rep
def __setitem__(self, item, value):
if not isinstance(value, BaseRepresentation):
raise TypeError(
f"value must be a representation instance, not {type(value)}."
)
if not (
isinstance(value, self.__class__)
or len(value.attr_classes) == len(self.attr_classes)
):
raise ValueError(
f"value must be representable as {self.__class__.__name__} "
"without loss of information."
)
diff_classes = {}
if self._differentials:
if self._differentials.keys() != value._differentials.keys():
raise ValueError("value must have the same differentials.")
for key, self_diff in self._differentials.items():
diff_classes[key] = self_diff_cls = self_diff.__class__
value_diff_cls = value._differentials[key].__class__
if not (
isinstance(value_diff_cls, self_diff_cls)
or (
len(value_diff_cls.attr_classes)
== len(self_diff_cls.attr_classes)
)
):
raise ValueError(
f"value differential {key!r} must be representable as "
f"{self_diff.__class__.__name__} without loss of information."
)
value = value.represent_as(self.__class__, diff_classes)
super().__setitem__(item, value)
for key, differential in self._differentials.items():
differential[item] = value._differentials[key]
def _scale_operation(self, op, *args):
"""Scale all non-angular components, leaving angular ones unchanged.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
results = []
for component, cls in self.attr_classes.items():
value = getattr(self, component)
if issubclass(cls, Angle):
results.append(value)
else:
results.append(op(value, *args))
# try/except catches anything that cannot initialize the class, such
# as operations that returned NotImplemented or a representation
# instead of a quantity (as would happen for, e.g., rep * rep).
try:
result = self.__class__(*results)
except Exception:
return NotImplemented
for key, differential in self.differentials.items():
diff_result = differential._scale_operation(op, *args, scaled_base=True)
result.differentials[key] = diff_result
return result
def _combine_operation(self, op, other, reverse=False):
"""Combine two representation.
By default, operate on the cartesian representations of both.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self.from_cartesian(result)
# We need to override this setter to support differentials
@BaseRepresentationOrDifferential.shape.setter
def shape(self, shape):
orig_shape = self.shape
# See: https://stackoverflow.com/questions/3336767/ for an example
BaseRepresentationOrDifferential.shape.fset(self, shape)
# also try to perform shape-setting on any associated differentials
try:
for k in self.differentials:
self.differentials[k].shape = shape
except Exception:
BaseRepresentationOrDifferential.shape.fset(self, orig_shape)
for k in self.differentials:
self.differentials[k].shape = orig_shape
raise
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.sqrt(
sum(
getattr(self, component) ** 2
for component, cls in self.attr_classes.items()
if not issubclass(cls, Angle)
)
)
def mean(self, *args, **kwargs):
"""Vector mean.
Averaging is done by converting the representation to cartesian, and
taking the mean of the x, y, and z components. The result is converted
back to the same representation as the input.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
mean : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector mean, in the same representation as that of the input.
"""
self._raise_if_has_differentials("mean")
return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
Adding is done by converting the representation to cartesian, and
summing the x, y, and z components. The result is converted back to the
same representation as the input.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
sum : `~astropy.coordinates.BaseRepresentation` subclass instance
Vector sum, in the same representation as that of the input.
"""
self._raise_if_has_differentials("sum")
return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs))
def dot(self, other):
"""Dot product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation`
The representation to take the dot product with.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of the
cartesian representations of ``self`` and ``other``.
"""
return self.to_cartesian().dot(other)
def cross(self, other):
"""Vector cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to the type of representation of ``self``.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.BaseRepresentation` subclass instance
With vectors perpendicular to both ``self`` and ``other``, in the
same type of representation as ``self``.
"""
self._raise_if_has_differentials("cross")
return self.from_cartesian(self.to_cartesian().cross(other))
class BaseDifferential(BaseRepresentationOrDifferential):
r"""A base class representing differentials of representations.
These represent differences or derivatives along each component.
E.g., for physics spherical coordinates, these would be
:math:`\delta r, \delta \theta, \delta \phi`.
Parameters
----------
d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D differentials. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
Notes
-----
All differential representation classes should subclass this base class,
and define an ``base_representation`` attribute with the class of the
regular `~astropy.coordinates.BaseRepresentation` for which differential
coordinates are provided. This will set up a default ``attr_classes``
instance with names equal to the base component names prefixed by ``d_``,
and all classes set to `~astropy.units.Quantity`, plus properties to access
those, and a default ``__init__`` for initialization.
"""
def __init_subclass__(cls, **kwargs):
"""Set default ``attr_classes`` and component getters on a Differential.
For these, the components are those of the base representation prefixed
by 'd_', and the class is `~astropy.units.Quantity`.
"""
# Don't do anything for base helper classes.
if cls.__name__ in (
"BaseDifferential",
"BaseSphericalDifferential",
"BaseSphericalCosLatDifferential",
):
return
if not hasattr(cls, "base_representation"):
raise NotImplementedError(
"Differential representations must have a"
'"base_representation" class attribute.'
)
# If not defined explicitly, create attr_classes.
if not hasattr(cls, "attr_classes"):
base_attr_classes = cls.base_representation.attr_classes
cls.attr_classes = {"d_" + c: u.Quantity for c in base_attr_classes}
repr_name = cls.get_name()
if repr_name in DIFFERENTIAL_CLASSES:
raise ValueError(f"Differential class {repr_name} already defined")
DIFFERENTIAL_CLASSES[repr_name] = cls
_invalidate_reprdiff_cls_hash()
# If not defined explicitly, create properties for the components.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(
cls,
component,
property(
_make_getter(component),
doc=f"Component '{component}' of the Differential.",
),
)
super().__init_subclass__(**kwargs)
@classmethod
def _check_base(cls, base):
if cls not in base._compatible_differentials:
raise TypeError(
f"Differential class {cls} is not compatible with the "
f"base (representation) class {base.__class__}"
)
def _get_deriv_key(self, base):
"""Given a base (representation instance), determine the unit of the
derivative by removing the representation unit from the component units
of this differential.
"""
# This check is just a last resort so we don't return a strange unit key
# from accidentally passing in the wrong base.
self._check_base(base)
for name in base.components:
comp = getattr(base, name)
d_comp = getattr(self, f"d_{name}", None)
if d_comp is not None:
d_unit = comp.unit / d_comp.unit
# This is quite a bit faster than using to_system() or going
# through Quantity()
d_unit_si = d_unit.decompose(u.si.bases)
d_unit_si._scale = 1 # remove the scale from the unit
return str(d_unit_si)
else:
raise RuntimeError(
"Invalid representation-differential units! This likely happened "
"because either the representation or the associated differential "
"have non-standard units. Check that the input positional data have "
"positional units, and the input velocity data have velocity units, "
"or are both dimensionless."
)
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors()
def to_cartesian(self, base):
"""Convert the differential to 3D rectangular cartesian coordinates.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
`~astropy.coordinates.CartesianDifferential`
This object, converted.
"""
base_e, base_sf = self._get_base_vectors(base)
return functools.reduce(
operator.add,
(
getattr(self, d_c) * base_sf[c] * base_e[c]
for d_c, c in zip(self.components, base.components)
),
)
@classmethod
def from_cartesian(cls, other, base):
"""Convert the differential from 3D rectangular cartesian coordinates to
the desired class.
Parameters
----------
other
The object to convert into this differential.
base : `~astropy.coordinates.BaseRepresentation`
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Will be converted to ``cls.base_representation`` if needed.
Returns
-------
`~astropy.coordinates.BaseDifferential` subclass instance
A new differential object that is this class' type.
"""
base = base.represent_as(cls.base_representation)
base_e, base_sf = cls._get_base_vectors(base)
return cls(
*(other.dot(e / base_sf[component]) for component, e in base_e.items()),
copy=False,
)
def represent_as(self, other_class, base):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
"""
if other_class is self.__class__:
return self
# The default is to convert via cartesian coordinates.
self_cartesian = self.to_cartesian(base)
if issubclass(other_class, BaseDifferential):
return other_class.from_cartesian(self_cartesian, base)
else:
return other_class.from_cartesian(self_cartesian)
@classmethod
def from_representation(cls, representation, base):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
base : instance of ``cls.base_representation``
The base relative to which the differentials will be defined. If
the representation is a differential itself, the base will be
converted to its ``base_representation`` to help convert it.
"""
if isinstance(representation, BaseDifferential):
cartesian = representation.to_cartesian(
base.represent_as(representation.base_representation)
)
else:
cartesian = representation.to_cartesian()
return cls.from_cartesian(cartesian, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
from .cartesian import CartesianDifferential
# route transformation through Cartesian
cdiff = self.represent_as(CartesianDifferential, base=base).transform(matrix)
# move back to original representation
diff = cdiff.represent_as(self.__class__, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
"""Scale all components.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
scaled_base : bool, optional
Whether the base was scaled the same way. This affects whether
differential components should be scaled. For instance, a differential
in longitude should not be scaled if its spherical base is scaled
in radius.
"""
scaled_attrs = [op(getattr(self, c), *args) for c in self.components]
return self.__class__(*scaled_attrs, copy=False)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If ``other`` is a representation,
it will be used as a base for which to evaluate the differential,
and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if isinstance(self, type(other)):
first, second = (self, other) if not reverse else (other, self)
return self.__class__(
*[op(getattr(first, c), getattr(second, c)) for c in self.components]
)
else:
try:
self_cartesian = self.to_cartesian(other)
except TypeError:
return NotImplemented
return other._combine_operation(op, self_cartesian, not reverse)
def __sub__(self, other):
# avoid "differential - representation".
if isinstance(other, BaseRepresentation):
return NotImplemented
return super().__sub__(other)
def norm(self, base=None):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Parameters
----------
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. This is
required to calculate the physical size of the differential for
all but Cartesian differentials or radial differentials.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
from .cartesian import CartesianDifferential
# RadialDifferential overrides this function, so there is no handling here
if not isinstance(self, CartesianDifferential) and base is None:
raise ValueError(
"`base` must be provided to calculate the norm of a"
f" {type(self).__name__}"
)
return self.to_cartesian(base).norm()
|
777f59f2872466cee627b967635aa695ce8537462c5cf92055a490098d3cf58b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Spherical representations and differentials."""
import operator
import numpy as np
from erfa import ufunc as erfa_ufunc
import astropy.units as u
from astropy.coordinates.angles import Angle, Latitude, Longitude
from astropy.coordinates.distances import Distance
from astropy.coordinates.matrix_utilities import is_O3
from astropy.utils import classproperty
from .base import BaseDifferential, BaseRepresentation
from .cartesian import CartesianRepresentation
class UnitSphericalRepresentation(BaseRepresentation):
"""
Representation of points on a unit sphere.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle'] or str
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude}
@classproperty
def _dimensional_representation(cls):
return SphericalRepresentation
def __init__(self, lon, lat=None, differentials=None, copy=True):
super().__init__(lon, lat, differentials=differentials, copy=copy)
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
# Could let the metaclass define these automatically, but good to have
# a bit clearer docstrings.
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=False),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=False
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = np.broadcast_to(1.0 / u.radian, self.shape, subok=True)
sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian
return {"lon": sf_lon, "lat": sf_lat}
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# erfa s2c: Convert [unit]spherical coordinates to Cartesian.
p = erfa_ufunc.s2c(self.lon, self.lat)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa c2s: P-vector to [unit]spherical coordinates.
return cls(*erfa_ufunc.c2s(p), copy=False)
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO! for differential_class. This cannot (currently) be implemented
# like in the other Representations since `_re_represent_differentials`
# keeps differentials' unit keys, but this can result in a mismatch
# between the UnitSpherical expected key (e.g. "s") and that expected
# in the other class (here "s / m"). For more info, see PR #11467
if isinstance(other_class, type) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(
phi=self.lon, theta=90 * u.deg - self.lat, r=1.0, copy=False
)
elif issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, distance=1.0, copy=False)
return super().represent_as(other_class, differential_class)
def transform(self, matrix):
r"""Transform the unit-spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
Returns
-------
`~astropy.coordinates.UnitSphericalRepresentation` or `~astropy.coordinates.SphericalRepresentation`
If ``matrix`` is O(3) -- :math:`M \dot M^T = I` -- like a rotation,
then the result is a `~astropy.coordinates.UnitSphericalRepresentation`.
All other matrices will change the distance, so the dimensional
representation is used instead.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat = erfa_ufunc.c2s(p)
rep = self.__class__(lon=lon, lat=lat)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
rep = rep.with_differentials(new_diffs)
else: # switch to dimensional representation
rep = self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1, differentials=self.differentials
).transform(matrix)
return rep
def _scale_operation(self, op, *args):
return self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1.0, differentials=self.differentials
)._scale_operation(op, *args)
def __neg__(self):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super().__neg__()
result = self.__class__(self.lon + 180.0 * u.deg, -self.lat, copy=False)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, operator.neg), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units, which is
always unity for vectors on the unit sphere.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self._dimensional_representation.from_cartesian(result)
def mean(self, *args, **kwargs):
"""Vector mean.
The representation is converted to cartesian, the means of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().mean(*args, **kwargs)
)
def sum(self, *args, **kwargs):
"""Vector sum.
The representation is converted to cartesian, the sums of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().sum(*args, **kwargs)
)
def cross(self, other):
"""Cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to `~astropy.coordinates.SphericalRepresentation`.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.SphericalRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().cross(other)
)
class RadialRepresentation(BaseRepresentation):
"""
Representation of the distance of points from the origin.
Note that this is mostly intended as an internal helper representation.
It can do little else but being used as a scale in multiplication.
Parameters
----------
distance : `~astropy.units.Quantity` ['length']
The distance of the point(s) from the origin.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"distance": u.Quantity}
def __init__(self, distance, differentials=None, copy=True):
super().__init__(distance, differentials=differentials, copy=copy)
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
"""Cartesian unit vectors are undefined for radial representation."""
raise NotImplementedError(
f"Cartesian unit vectors are undefined for {self.__class__} instances"
)
def scale_factors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"distance": l}
def to_cartesian(self):
"""Cannot convert radial representation to cartesian."""
raise NotImplementedError(
f"cannot convert {self.__class__} instance to cartesian."
)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to radial coordinate.
"""
return cls(distance=cart.norm(), copy=False)
def __mul__(self, other):
if isinstance(other, BaseRepresentation):
return self.distance * other
else:
return super().__mul__(other)
def norm(self):
"""Vector norm.
Just the distance itself.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return self.distance
def _combine_operation(self, op, other, reverse=False):
return NotImplemented
def transform(self, matrix):
"""Radial representations cannot be transformed by a Cartesian matrix.
Parameters
----------
matrix : array-like
The transformation matrix in a Cartesian basis.
Must be a multiplication: a diagonal matrix with identical elements.
Must have shape (..., 3, 3), where the last 2 indices are for the
matrix on each other axis. Make sure that the matrix shape is
compatible with the shape of this representation.
Raises
------
ValueError
If the matrix is not a multiplication.
"""
scl = matrix[..., 0, 0]
# check that the matrix is a scaled identity matrix on the last 2 axes.
if np.any(matrix != scl[..., np.newaxis, np.newaxis] * np.identity(3)):
raise ValueError(
"Radial representations can only be "
"transformed by a scaled identity matrix"
)
return self * scl
def _spherical_op_funcs(op, *args):
"""For given operator, return functions that adjust lon, lat, distance."""
if op is operator.neg:
return lambda x: x + 180 * u.deg, operator.neg, operator.pos
try:
scale_sign = np.sign(args[0])
except Exception:
# This should always work, even if perhaps we get a negative distance.
return operator.pos, operator.pos, lambda x: op(x, *args)
scale = abs(args[0])
return (
lambda x: x + 180 * u.deg * np.signbit(scale_sign),
lambda x: x * scale_sign,
lambda x: op(x, scale),
)
class SphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle']
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
distance : `~astropy.units.Quantity` ['length']
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude, "distance": u.Quantity}
_unit_representation = UnitSphericalRepresentation
def __init__(self, lon, lat=None, distance=None, differentials=None, copy=True):
super().__init__(lon, lat, distance, copy=copy, differentials=differentials)
if (
not isinstance(self._distance, Distance)
and self._distance.unit.physical_type == "length"
):
try:
self._distance = Distance(self._distance, copy=False)
except ValueError as e:
if e.args[0].startswith("distance must be >= 0"):
raise ValueError(
"Distance must be >= 0. To allow negative distance values, you"
" must explicitly pass in a `Distance` object with the the "
"argument 'allow_negative=True'."
) from e
else:
raise
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=False),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=False
),
"distance": CartesianRepresentation(
coslat * coslon, coslat * sinlon, sinlat, copy=False
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = self.distance / u.radian
sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat)
sf_distance = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"lon": sf_lon, "lat": sf_lat, "distance": sf_distance}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if isinstance(other_class, type):
if issubclass(other_class, PhysicsSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
phi=self.lon,
theta=90 * u.deg - self.lat,
r=self.distance,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.lon, lat=self.lat, differentials=diffs, copy=False
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.distance, Distance):
d = self.distance.view(u.Quantity)
else:
d = self.distance
# erfa s2p: Convert spherical polar coordinates to p-vector.
p = erfa_ufunc.s2p(self.lon, self.lat, d)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa p2s: P-vector to spherical polar coordinates.
return cls(*erfa_ufunc.p2s(p), copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p)
rep = self.__class__(lon=lon, lat=lat, distance=self.distance * ur)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the distance.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.distance)
def _scale_operation(self, op, *args):
# TODO: expand special-casing to UnitSpherical and RadialDifferential.
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
lon_op, lat_op, distance_op = _spherical_op_funcs(op, *args)
result = self.__class__(
lon_op(self.lon), lat_op(self.lat), distance_op(self.distance), copy=False
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, lat_op, distance_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class PhysicsSphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates (using the physics
convention of using ``phi`` and ``theta`` for azimuth and inclination
from the pole).
Parameters
----------
phi, theta : `~astropy.units.Quantity` or str
The azimuth and inclination of the point(s), in angular units. The
inclination should be between 0 and 180 degrees, and the azimuth will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi`
will be changed inplace if it is not between 0 and 360 degrees.
r : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.PhysicsSphericalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.PhysicsSphericalDifferential` instance, or a dictionary of of
differential instances with keys set to a string representation of the
SI unit with which the differential (derivative) is taken. For example,
for a velocity differential on a positional representation, the key
would be ``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"phi": Angle, "theta": Angle, "r": u.Quantity}
def __init__(self, phi, theta=None, r=None, differentials=None, copy=True):
super().__init__(phi, theta, r, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
# Note that _phi already holds our own copy if copy=True.
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.0 * u.deg) or np.any(self._theta > 180.0 * u.deg):
raise ValueError(
"Inclination angle(s) must be within 0 deg <= angle <= 180 deg, "
f"got {theta.to(u.degree)}"
)
if self._r.unit.physical_type == "length":
self._r = self._r.view(Distance)
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def theta(self):
"""
The elevation of the point(s).
"""
return self._theta
@property
def r(self):
"""
The distance from the origin to the point(s).
"""
return self._r
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
"phi": CartesianRepresentation(-sinphi, cosphi, 0.0, copy=False),
"theta": CartesianRepresentation(
costheta * cosphi, costheta * sinphi, -sintheta, copy=False
),
"r": CartesianRepresentation(
sintheta * cosphi, sintheta * sinphi, costheta, copy=False
),
}
def scale_factors(self):
r = self.r / u.radian
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"phi": r * sintheta, "theta": r, "r": l}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if isinstance(other_class, type):
if issubclass(other_class, SphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
distance=self.r,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.phi,
lat=90 * u.deg - self.theta,
differentials=diffs,
copy=False,
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.r, Distance):
d = self.r.view(u.Quantity)
else:
d = self.r
x = d * np.sin(self.theta) * np.cos(self.phi)
y = d * np.sin(self.theta) * np.sin(self.phi)
z = d * np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, r=r, copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
# apply transformation in unit-spherical coordinates
xyz = erfa_ufunc.s2c(self.phi, 90 * u.deg - self.theta)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p) # `ur` is transformed unit-`r`
# create transformed physics-spherical representation,
# reapplying the distance scaling
rep = self.__class__(phi=lon, theta=90 * u.deg - lat, r=self.r * ur)
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the radius.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.r)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, adjust_theta_sign, r_op = _spherical_op_funcs(op, *args)
# Also run phi_op on theta to ensure theta remains between 0 and 180:
# any time the scale is negative, we do -theta + 180 degrees.
result = self.__class__(
phi_op(self.phi),
phi_op(adjust_theta_sign(self.theta)),
r_op(self.r),
copy=False,
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, adjust_theta_sign, r_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class BaseSphericalDifferential(BaseDifferential):
def _d_lon_coslat(self, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon * np.cos(base.lat)
@classmethod
def _get_d_lon(cls, d_lon_coslat, base):
"""Convert longitude differential d_lon_coslat to d_lon.
Parameters
----------
d_lon_coslat : `~astropy.units.Quantity`
Longitude differential that includes ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon_coslat / np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (
isinstance(other, BaseSphericalDifferential)
and not isinstance(self, type(other))
or isinstance(other, RadialDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
@classproperty
def _dimensional_differential(cls):
return SphericalDifferential
def __init__(self, d_lon, d_lat=None, copy=True):
super().__init__(d_lon, d_lat, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon and d_lat should have equivalent units.")
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, representation.d_lat)
elif isinstance(
representation,
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential),
):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
# some of this can be moved to the parent class.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lon.unit / base.lon.unit # derivative unit
diff = self._dimensional_differential(
d_lon=self.d_lon, d_lat=self.d_lat, d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The differential longitude and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalDifferential
def __init__(self, d_lon, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon, d_lat, d_distance, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon and d_lat should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_lon, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self.d_lon, -self.d_lat, self.d_distance)
else:
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(
representation.d_phi, -representation.d_theta, representation.d_r
)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_lon, self.d_lat, op(self.d_distance, *args))
else:
return super()._scale_operation(op, *args)
class BaseSphericalCosLatDifferential(BaseDifferential):
"""Differentials from points on a spherical base representation.
With cos(lat) assumed to be included in the longitude differential.
"""
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from (unit)spherical base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates. The scale factor for
longitude does not include the cos(lat) factor.
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors(omit_coslat=True)
def _d_lon(self, base):
"""Convert longitude differential with cos(lat) to one without.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon_coslat / np.cos(base.lat)
@classmethod
def _get_d_lon_coslat(cls, d_lon, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
d_lon : `~astropy.units.Quantity`
Value of the longitude differential without ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon * np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (
isinstance(other, BaseSphericalCosLatDifferential)
and not isinstance(self, type(other))
or isinstance(other, RadialDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalCosLatDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = UnitSphericalRepresentation
attr_classes = {"d_lon_coslat": u.Quantity, "d_lat": u.Quantity}
@classproperty
def _dimensional_differential(cls):
return SphericalCosLatDifferential
def __init__(self, d_lon_coslat, d_lat=None, copy=True):
super().__init__(d_lon_coslat, d_lat, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon_coslat and d_lat should have equivalent units.")
@classmethod
def from_cartesian(cls, other, base):
# Go via the dimensional equivalent, so that the longitude and latitude
# differentials correctly take into account the norm of the base.
dimensional = cls._dimensional_differential.from_cartesian(other, base)
return dimensional.represent_as(cls)
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though w/o CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
return cls(representation.d_lon_coslat, representation.d_lat)
elif isinstance(
representation, (SphericalDifferential, UnitSphericalDifferential)
):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta)
return super().from_representation(representation, base)
def transform(self, matrix, base, transformed_base):
"""Transform differential using a 3x3 matrix in a Cartesian basis.
This returns a new differential and does not modify the original one.
Parameters
----------
matrix : (3,3) array-like
A 3x3 (or stack thereof) matrix, such as a rotation matrix.
base : instance of ``cls.base_representation``
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
transformed_base : instance of ``cls.base_representation``
Base relative to which the transformed differentials are defined.
If the other class is a differential representation, the base will
be converted to its ``base_representation``.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_O3(matrix)): # remain in unit-rep
# TODO! implement without Cartesian intermediate step.
diff = super().transform(matrix, base, transformed_base)
else: # switch to dimensional representation
du = self.d_lat.unit / base.lat.unit # derivative unit
diff = self._dimensional_differential(
d_lon_coslat=self.d_lon_coslat, d_lat=self.d_lat, d_distance=0 * du
).transform(matrix, base, transformed_base)
return diff
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.copy()
else:
return super()._scale_operation(op, *args)
class SphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The differential longitude (with cos(lat) included) and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalCosLatDifferential
attr_classes = {
"d_lon_coslat": u.Quantity,
"d_lat": u.Quantity,
"d_distance": u.Quantity,
}
def __init__(self, d_lon_coslat, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon_coslat and d_lat should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though some need base for the latitude to remove cos(lat).
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self.d_lon_coslat, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalDifferential):
return other_class(self._d_lon(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self._d_lon(base), -self.d_lat, self.d_distance)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat.
if isinstance(representation, SphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta, representation.d_r)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(
self.d_lon_coslat, self.d_lat, op(self.d_distance, *args)
)
else:
return super()._scale_operation(op, *args)
class RadialDifferential(BaseDifferential):
"""Differential(s) of radial distances.
Parameters
----------
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = RadialRepresentation
def to_cartesian(self, base):
unit_vec = base.represent_as(UnitSphericalRepresentation).to_cartesian()
return self.d_distance * unit_vec
def norm(self, base=None):
return self.d_distance
@classmethod
def from_cartesian(cls, other, base):
return cls(
other.dot(base.represent_as(UnitSphericalRepresentation)), copy=False
)
@classmethod
def from_representation(cls, representation, base=None):
if isinstance(
representation, (SphericalDifferential, SphericalCosLatDifferential)
):
return cls(representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_r)
else:
return super().from_representation(representation, base)
def _combine_operation(self, op, other, reverse=False):
if isinstance(other, self.base_representation):
if reverse:
first, second = other.distance, self.d_distance
else:
first, second = self.d_distance, other.distance
return other.__class__(op(first, second), copy=False)
elif isinstance(
other, (BaseSphericalDifferential, BaseSphericalCosLatDifferential)
):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalDifferential(**result_args)
else:
return super()._combine_operation(op, other, reverse)
class PhysicsSphericalDifferential(BaseDifferential):
"""Differential(s) of 3D spherical coordinates using physics convention.
Parameters
----------
d_phi, d_theta : `~astropy.units.Quantity`
The differential azimuth and inclination.
d_r : `~astropy.units.Quantity`
The differential radial distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = PhysicsSphericalRepresentation
def __init__(self, d_phi, d_theta=None, d_r=None, copy=True):
super().__init__(d_phi, d_theta, d_r, copy=copy)
if not self._d_phi.unit.is_equivalent(self._d_theta.unit):
raise u.UnitsError("d_phi and d_theta should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude. For those, explicitly
# do the equivalent of self._d_lon_coslat in SphericalDifferential.
if issubclass(other_class, SphericalDifferential):
return other_class(self.d_phi, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_phi, -self.d_theta)
elif issubclass(other_class, SphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_r)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat. For that case,
# do the equivalent of cls._d_lon in SphericalDifferential.
if isinstance(representation, SphericalDifferential):
return cls(
representation.d_lon, -representation.d_lat, representation.d_distance
)
elif isinstance(representation, SphericalCosLatDifferential):
cls._check_base(base)
d_phi = representation.d_lon_coslat / np.sin(base.theta)
return cls(d_phi, -representation.d_lat, representation.d_distance)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_phi, self.d_theta, op(self.d_r, *args))
else:
return super()._scale_operation(op, *args)
|
740d03ab7859fd5a1a7d8884f2924468f8a8d681a1f2f3e3766efdb903c68e26 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Cylindrical representations and differentials."""
import operator
import numpy as np
import astropy.units as u
from astropy.coordinates.angles import Angle
from .base import BaseDifferential, BaseRepresentation
from .cartesian import CartesianRepresentation
from .spherical import _spherical_op_funcs
class CylindricalRepresentation(BaseRepresentation):
"""
Representation of points in 3D cylindrical coordinates.
Parameters
----------
rho : `~astropy.units.Quantity`
The distance from the z axis to the point(s).
phi : `~astropy.units.Quantity` or str
The azimuth of the point(s), in angular units, which will be wrapped
to an angle between 0 and 360 degrees. This can also be instances of
`~astropy.coordinates.Angle`,
z : `~astropy.units.Quantity`
The z coordinate(s) of the point(s)
differentials : dict, `~astropy.coordinates.CylindricalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.CylindricalDifferential` instance, or a dictionary of of differential
instances with keys set to a string representation of the SI unit with
which the differential (derivative) is taken. For example, for a
velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"rho": u.Quantity, "phi": Angle, "z": u.Quantity}
def __init__(self, rho, phi=None, z=None, differentials=None, copy=True):
super().__init__(rho, phi, z, copy=copy, differentials=differentials)
if not self._rho.unit.is_equivalent(self._z.unit):
raise u.UnitsError("rho and z should have matching physical types")
@property
def rho(self):
"""
The distance of the point(s) from the z-axis.
"""
return self._rho
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def z(self):
"""
The height of the point(s).
"""
return self._z
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
l = np.broadcast_to(1.0, self.shape)
return {
"rho": CartesianRepresentation(cosphi, sinphi, 0, copy=False),
"phi": CartesianRepresentation(-sinphi, cosphi, 0, copy=False),
"z": CartesianRepresentation(0, 0, l, unit=u.one, copy=False),
}
def scale_factors(self):
rho = self.rho / u.radian
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"rho": l, "phi": rho, "z": l}
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to cylindrical polar
coordinates.
"""
rho = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
z = cart.z
return cls(rho=rho, phi=phi, z=z, copy=False)
def to_cartesian(self):
"""
Converts cylindrical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = self.rho * np.cos(self.phi)
y = self.rho * np.sin(self.phi)
z = self.z
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, _, rho_op = _spherical_op_funcs(op, *args)
z_op = lambda x: op(x, *args)
result = self.__class__(
rho_op(self.rho), phi_op(self.phi), z_op(self.z), copy=False
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(rho_op, operator.pos, z_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
class CylindricalDifferential(BaseDifferential):
"""Differential(s) of points in cylindrical coordinates.
Parameters
----------
d_rho : `~astropy.units.Quantity` ['speed']
The differential cylindrical radius.
d_phi : `~astropy.units.Quantity` ['angular speed']
The differential azimuth.
d_z : `~astropy.units.Quantity` ['speed']
The differential height.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = CylindricalRepresentation
def __init__(self, d_rho, d_phi=None, d_z=None, copy=False):
super().__init__(d_rho, d_phi, d_z, copy=copy)
if not self._d_rho.unit.is_equivalent(self._d_z.unit):
raise u.UnitsError("d_rho and d_z should have equivalent units.")
|
84fade38bf0c9d2440261773ac1dc61b6b7371faf1366be7a70d591ae2ead675 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import errno
import gzip
import http.client
import io
import mmap
import operator
import os
import re
import sys
import tempfile
import warnings
import zipfile
from functools import reduce
import numpy as np
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
from astropy.utils.data import (
_is_url,
_requires_fsspec,
download_file,
get_readable_fileobj,
)
from astropy.utils.decorators import classproperty
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from .util import (
_array_from_file,
_array_to_file,
_write_string,
fileobj_closed,
fileobj_mode,
fileobj_name,
isfile,
isreadable,
iswritable,
path_like,
)
if HAS_BZ2:
import bz2
# Maps astropy.io.fits-specific file mode names to the appropriate file
# modes to use for the underlying raw files.
IO_FITS_MODES = {
"readonly": "rb",
"copyonwrite": "rb",
"update": "rb+",
"append": "ab+",
"ostream": "wb",
"denywrite": "rb",
}
# Maps OS-level file modes to the appropriate astropy.io.fits specific mode
# to use when given file objects but no mode specified; obviously in
# IO_FITS_MODES there are overlaps; for example 'readonly' and 'denywrite'
# both require the file to be opened in 'rb' mode. But 'readonly' is the
# default behavior for such files if not otherwise specified.
# Note: 'ab' is only supported for 'ostream' which is output-only.
FILE_MODES = {
"rb": "readonly",
"rb+": "update",
"wb": "ostream",
"wb+": "update",
"ab": "ostream",
"ab+": "append",
}
# A match indicates the file was opened in text mode, which is not allowed
TEXT_RE = re.compile(r"^[rwa]((t?\+?)|(\+?t?))$")
# readonly actually uses copyonwrite for mmap so that readonly without mmap and
# with mmap still have to same behavior with regard to updating the array. To
# get a truly readonly mmap use denywrite
# the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it
# should be clarified that 'denywrite' mode is not directly analogous to the
# use of that flag; it was just taken, for lack of anything better, as a name
# that means something like "read only" but isn't readonly.
MEMMAP_MODES = {
"readonly": mmap.ACCESS_COPY,
"copyonwrite": mmap.ACCESS_COPY,
"update": mmap.ACCESS_WRITE,
"append": mmap.ACCESS_COPY,
"denywrite": mmap.ACCESS_READ,
}
# TODO: Eventually raise a warning, and maybe even later disable the use of
# 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however,
# that would generate too many warnings for too many users. If nothing else,
# wait until the new logging system is in place.
GZIP_MAGIC = b"\x1f\x8b\x08"
PKZIP_MAGIC = b"\x50\x4b\x03\x04"
BZIP2_MAGIC = b"\x42\x5a"
def _is_bz2file(fileobj):
if HAS_BZ2:
return isinstance(fileobj, bz2.BZ2File)
else:
return False
def _normalize_fits_mode(mode):
if mode is not None and mode not in IO_FITS_MODES:
if TEXT_RE.match(mode):
raise ValueError(
f"Text mode '{mode}' not supported: files must be opened in binary mode"
)
new_mode = FILE_MODES.get(mode)
if new_mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
mode = new_mode
return mode
class _File:
"""
Represents a FITS file on disk (or in some other file-like object).
"""
def __init__(
self,
fileobj=None,
mode=None,
memmap=None,
overwrite=False,
cache=True,
*,
use_fsspec=None,
fsspec_kwargs=None,
):
self.strict_memmap = bool(memmap)
memmap = True if memmap is None else memmap
self._file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
# Should the object be closed on error: see
# https://github.com/astropy/astropy/issues/6168
self.close_on_error = False
# Holds mmap instance for files that use mmap
self._mmap = None
if fileobj is None:
self.simulateonly = True
return
else:
self.simulateonly = False
if isinstance(fileobj, os.PathLike):
fileobj = os.fspath(fileobj)
if mode is not None and mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
if isfile(fileobj):
objmode = _normalize_fits_mode(fileobj_mode(fileobj))
if mode is not None and mode != objmode:
raise ValueError(
"Requested FITS mode '{}' not compatible with open file "
"handle mode '{}'".format(mode, objmode)
)
mode = objmode
if mode is None:
mode = "readonly"
# Handle cloud-hosted files using the optional ``fsspec`` dependency
if (use_fsspec or _requires_fsspec(fileobj)) and mode != "ostream":
# Note: we don't use `get_readable_fileobj` as a context manager
# because io.fits takes care of closing files itself
fileobj = get_readable_fileobj(
fileobj,
encoding="binary",
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
close_files=False,
).__enter__()
# Handle raw URLs
if (
isinstance(fileobj, (str, bytes))
and mode not in ("ostream", "append", "update")
and _is_url(fileobj)
):
self.name = download_file(fileobj, cache=cache)
# Handle responses from URL requests that have already been opened
elif isinstance(fileobj, http.client.HTTPResponse):
if mode in ("ostream", "append", "update"):
raise ValueError(f"Mode {mode} not supported for HTTPResponse")
fileobj = io.BytesIO(fileobj.read())
else:
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
self.name = fileobj_name(fileobj)
self.mode = mode
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# Initialize the internal self._file object
if isfile(fileobj):
self._open_fileobj(fileobj, mode, overwrite)
elif isinstance(fileobj, (str, bytes)):
self._open_filename(fileobj, mode, overwrite)
else:
self._open_filelike(fileobj, mode, overwrite)
self.fileobj_mode = fileobj_mode(self._file)
if isinstance(fileobj, gzip.GzipFile):
self.compression = "gzip"
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = "zip"
elif _is_bz2file(fileobj):
self.compression = "bzip2"
if mode in ("readonly", "copyonwrite", "denywrite") or (
self.compression and mode == "update"
):
self.readonly = True
elif mode == "ostream" or (self.compression and mode == "append"):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if mode == "ostream" or self.compression or not hasattr(self._file, "seek"):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self._file.tell()
self._file.seek(0, 2)
self.size = self._file.tell()
self._file.seek(pos)
if self.memmap:
if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._mmap_available:
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return f"<{self.__module__}.{self.__class__.__name__} {self._file}>"
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self._file)
def read(self, size=None):
if not hasattr(self._file, "read"):
raise EOFError
try:
return self._file.read(size)
except OSError:
# On some versions of Python, it appears, GzipFile will raise an
# OSError if you try to read past its end (as opposed to just
# returning '')
if self.compression == "gzip":
return ""
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self._file, "read"):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError(f"size {size} not a multiple of {dtype}")
if isinstance(shape, int):
shape = (shape,)
if not (size or shape):
warnings.warn(
"No size or shape given to readarray(); assuming a shape of (1,)",
AstropyUserWarning,
)
shape = (1,)
if size and not shape:
shape = (size // dtype.itemsize,)
if size and shape:
actualsize = np.prod(shape) * dtype.itemsize
if actualsize > size:
raise ValueError(
f"size {size} is too few bytes for a {shape} array of {dtype}"
)
elif actualsize < size:
raise ValueError(
f"size {size} is too many bytes for a {shape} array of {dtype}"
)
filepos = self._file.tell()
try:
if self.memmap:
if self._mmap is None:
# Instantiate Memmap array of the file offset at 0 (so we
# can return slices of it to offset anywhere else into the
# file)
access_mode = MEMMAP_MODES[self.mode]
# For reasons unknown the file needs to point to (near)
# the beginning or end of the file. No idea how close to
# the beginning or end.
# If I had to guess there is some bug in the mmap module
# of CPython or perhaps in microsoft's underlying code
# for generating the mmap.
self._file.seek(0, 0)
# This would also work:
# self._file.seek(0, 2) # moves to the end
try:
self._mmap = mmap.mmap(
self._file.fileno(), 0, access=access_mode, offset=0
)
except OSError as exc:
# NOTE: mode='readonly' results in the memory-mapping
# using the ACCESS_COPY mode in mmap so that users can
# modify arrays. However, on some systems, the OS raises
# a '[Errno 12] Cannot allocate memory' OSError if the
# address space is smaller than the file. The solution
# is to open the file in mode='denywrite', which at
# least allows the file to be opened even if the
# resulting arrays will be truly read-only.
if exc.errno == errno.ENOMEM and self.mode == "readonly":
warnings.warn(
"Could not memory map array with "
"mode='readonly', falling back to "
"mode='denywrite', which means that "
"the array will be read-only",
AstropyUserWarning,
)
self._mmap = mmap.mmap(
self._file.fileno(),
0,
access=MEMMAP_MODES["denywrite"],
offset=0,
)
else:
raise
return np.ndarray(
shape=shape, dtype=dtype, offset=offset, buffer=self._mmap
)
else:
count = reduce(operator.mul, shape)
self._file.seek(offset)
data = _array_from_file(self._file, dtype, count)
data.shape = shape
return data
finally:
# Make sure we leave the file in the position we found it; on
# some platforms (e.g. Windows) mmaping a file handle can also
# reset its file pointer.
# Also for Windows when using mmap seek() may return weird
# negative values, which is fixed by calling tell() before.
self._file.tell()
self._file.seek(filepos)
def writable(self):
if self.readonly:
return False
return iswritable(self._file)
def write(self, string):
if self.simulateonly:
return
if hasattr(self._file, "write"):
_write_string(self._file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if self.simulateonly:
return
if hasattr(self._file, "write"):
_array_to_file(array, self._file)
def flush(self):
if self.simulateonly:
return
if hasattr(self._file, "flush"):
self._file.flush()
def seek(self, offset, whence=0):
if not hasattr(self._file, "seek"):
return
self._file.seek(offset, whence)
pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn(
"File may have been truncated: actual file length "
"({}) is smaller than the expected size ({})".format(self.size, pos),
AstropyUserWarning,
)
def tell(self):
if self.simulateonly:
raise OSError
if not hasattr(self._file, "tell"):
raise EOFError
return self._file.tell()
def truncate(self, size=None):
if hasattr(self._file, "truncate"):
self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self._file, "close"):
self._file.close()
self._maybe_close_mmap()
# Set self._memmap to None anyways since no new .data attributes can be
# loaded after the file is closed
self._mmap = None
self.closed = True
self.close_on_error = False
def _maybe_close_mmap(self, refcount_delta=0):
"""
When mmap is in use these objects hold a reference to the mmap of the
file (so there is only one, shared by all HDUs that reference this
file).
This will close the mmap if there are no arrays referencing it.
"""
# sys.getrefcount is CPython specific and not on PyPy.
if (
self._mmap is not None
and hasattr(sys, "getrefcount")
and sys.getrefcount(self._mmap) == 2 + refcount_delta
):
self._mmap.close()
self._mmap = None
def _overwrite_existing(self, overwrite, fileobj, closed):
"""Overwrite an existing file if ``overwrite`` is ``True``, otherwise
raise an OSError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if (self.file_like and hasattr(fileobj, "len") and fileobj.len > 0) or (
os.path.exists(self.name) and os.path.getsize(self.name) != 0
):
if overwrite:
if self.file_like and hasattr(fileobj, "truncate"):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise OSError(NOT_OVERWRITING_MSG.format(self.name))
def _try_read_compressed(self, obj_or_name, magic, mode, ext=""):
"""Attempt to determine if the given file is compressed."""
is_ostream = mode == "ostream"
if (is_ostream and ext == ".gz") or magic.startswith(GZIP_MAGIC):
if mode == "append":
raise OSError(
"'append' mode is not supported with gzip files."
"Use 'update' mode instead"
)
# Handle gzip files
kwargs = {"mode": IO_FITS_MODES[mode]}
if isinstance(obj_or_name, str):
kwargs["filename"] = obj_or_name
else:
kwargs["fileobj"] = obj_or_name
self._file = gzip.GzipFile(**kwargs)
self.compression = "gzip"
elif (is_ostream and ext == ".zip") or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
self.compression = "zip"
elif (is_ostream and ext == ".bz2") or magic.startswith(BZIP2_MAGIC):
# Handle bzip2 files
if mode in ["update", "append"]:
raise OSError(
"update and append modes are not supported with bzip2 files"
)
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
# bzip2 only supports 'w' and 'r' modes
bzip2_mode = "w" if is_ostream else "r"
self._file = bz2.BZ2File(obj_or_name, mode=bzip2_mode)
self.compression = "bzip2"
return self.compression is not None
def _open_fileobj(self, fileobj, mode, overwrite):
"""Open a FITS file from a file object (including compressed files)."""
closed = fileobj_closed(fileobj)
# FIXME: this variable was unused, check if it was useful
# fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode]
if mode == "ostream":
self._overwrite_existing(overwrite, fileobj, closed)
if not closed:
self._file = fileobj
elif isfile(fileobj):
self._file = open(self.name, IO_FITS_MODES[mode])
# Attempt to determine if the file represented by the open file object
# is compressed
try:
# We need to account for the possibility that the underlying file
# handle may have been opened with either 'ab' or 'ab+', which
# means that the current file position is at the end of the file.
if mode in ["ostream", "append"]:
self._file.seek(0)
magic = self._file.read(4)
# No matter whether the underlying file was opened with 'ab' or
# 'ab+', we need to return to the beginning of the file in order
# to properly process the FITS header (and handle the possibility
# of a compressed file).
self._file.seek(0)
except OSError:
return
self._try_read_compressed(fileobj, magic, mode)
def _open_filelike(self, fileobj, mode, overwrite):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self._file = fileobj
if fileobj_closed(fileobj):
raise OSError(
f"Cannot read from/write to a closed file-like object ({fileobj!r})."
)
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
# We can bypass any additional checks at this point since now
# self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if not hasattr(self._file, "seek") or not hasattr(self._file, "tell"):
self.mode = mode = "ostream"
if mode == "ostream":
self._overwrite_existing(overwrite, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if self.mode in ("update", "append", "ostream") and not hasattr(
self._file, "write"
):
raise OSError(
"File-like object does not have a 'write' "
"method, required for mode '{}'.".format(self.mode)
)
# Any mode except for 'ostream' requires readability
if self.mode != "ostream" and not hasattr(self._file, "read"):
raise OSError(
"File-like object does not have a 'read' "
"method, required for mode {!r}.".format(self.mode)
)
def _open_filename(self, filename, mode, overwrite):
"""Open a FITS file from a filename string."""
if mode == "ostream":
self._overwrite_existing(overwrite, None, True)
if os.path.exists(self.name):
with open(self.name, "rb") as f:
magic = f.read(4)
else:
magic = b""
ext = os.path.splitext(self.name)[1]
if not self._try_read_compressed(self.name, magic, mode, ext=ext):
self._file = open(self.name, IO_FITS_MODES[mode])
self.close_on_error = True
# Make certain we're back at the beginning of the file
# BZ2File does not support seek when the file is open for writing, but
# when opening a file for write, bz2.BZ2File always truncates anyway.
if not (_is_bz2file(self._file) and mode == "ostream"):
self._file.seek(0)
@classproperty(lazy=True)
def _mmap_available(cls):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, b" ")
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except OSError as exc:
warnings.warn(
f"Failed to create mmap: {exc}; mmap use will be disabled",
AstropyUserWarning,
)
del exc
return False
try:
mm.flush()
except OSError:
warnings.warn(
"mmap.flush is unavailable on this platform; "
"using mmap in writeable mode will be disabled",
AstropyUserWarning,
)
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
return True
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ("update", "append"):
raise OSError("Writing to zipped fits files is not currently supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise OSError("Zip files with multiple members are not supported.")
self._file = tempfile.NamedTemporaryFile(suffix=".fits")
self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
# We just wrote the contents of the first file in the archive to a new
# temp file, which now serves as our underlying file object. So it's
# necessary to reset the position back to the beginning
self._file.seek(0)
|
141d67d41e4b8170acf87aa97ba72227c533ebb612c7400986d0789bfa7e355d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import warnings
from copy import deepcopy
import numpy as np
from astropy import units as u
from astropy.io import registry as io_registry
from astropy.table import Column, MaskedColumn, Table, meta, serialize
from astropy.time import Time
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import BinTableHDU, GroupsHDU, HDUList, TableHDU
from . import append as fits_append
from .column import KEYWORD_NAMES, _fortran_to_python_format
from .convenience import table_to_hdu
from .hdu.hdulist import FITS_SIGNATURE
from .hdu.hdulist import fitsopen as fits_open
from .util import first
# Keywords to remove for all tables that are read in
REMOVE_KEYWORDS = [
"XTENSION",
"BITPIX",
"NAXIS",
"NAXIS1",
"NAXIS2",
"PCOUNT",
"GCOUNT",
"TFIELDS",
"THEAP",
]
# Column-specific keywords regex
COLUMN_KEYWORD_REGEXP = "(" + "|".join(KEYWORD_NAMES) + ")[0-9]+"
def is_column_keyword(keyword):
return re.match(COLUMN_KEYWORD_REGEXP, keyword) is not None
def is_fits(origin, filepath, fileobj, *args, **kwargs):
"""
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file.
"""
if fileobj is not None:
pos = fileobj.tell()
sig = fileobj.read(30)
fileobj.seek(pos)
return sig == FITS_SIGNATURE
elif filepath is not None:
return filepath.lower().endswith(
(".fits", ".fits.gz", ".fit", ".fit.gz", ".fts", ".fts.gz")
)
return isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU))
def _decode_mixins(tbl):
"""Decode a Table ``tbl`` that has astropy Columns + appropriate meta-data into
the corresponding table with mixin columns (as appropriate).
"""
# If available read in __serialized_columns__ meta info which is stored
# in FITS COMMENTS between two sentinels.
try:
i0 = tbl.meta["comments"].index("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
i1 = tbl.meta["comments"].index("--END-ASTROPY-SERIALIZED-COLUMNS--")
except (ValueError, KeyError):
return tbl
# The YAML data are split into COMMENT cards, with lines longer than 70
# characters being split with a continuation character \ (backslash).
# Strip the backslashes and join together.
continuation_line = False
lines = []
for line in tbl.meta["comments"][i0 + 1 : i1]:
if continuation_line:
lines[-1] = lines[-1] + line[:70]
else:
lines.append(line[:70])
continuation_line = len(line) == 71
del tbl.meta["comments"][i0 : i1 + 1]
if not tbl.meta["comments"]:
del tbl.meta["comments"]
info = meta.get_header_from_yaml(lines)
# Add serialized column information to table meta for use in constructing mixins
tbl.meta["__serialized_columns__"] = info["meta"]["__serialized_columns__"]
# Use the `datatype` attribute info to update column attributes that are
# NOT already handled via standard FITS column keys (name, dtype, unit).
for col in info["datatype"]:
for attr in ["description", "meta"]:
if attr in col:
setattr(tbl[col["name"]].info, attr, col[attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
tbl = serialize._construct_mixins_from_columns(tbl)
return tbl
def read_table_fits(
input,
hdu=None,
astropy_native=False,
memmap=False,
character_as_bytes=True,
unit_parse_strict="warn",
mask_invalid=True,
):
"""
Read a Table object from an FITS file.
If the ``astropy_native`` argument is ``True``, then input FITS columns
which are representations of an astropy core object will be converted to
that class and stored in the ``Table`` as "mixin columns". Currently this
is limited to FITS columns which adhere to the FITS Time standard, in which
case they will be converted to a `~astropy.time.Time` column in the output
table.
Parameters
----------
input : str or file-like or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
astropy_native : bool, optional
Read in FITS columns as native astropy objects where possible instead
of standard Table Column objects. Default is False.
memmap : bool, optional
Whether to use memory mapping, which accesses data on disk as needed. If
you are only accessing part of the data, this is often more efficient.
If you want to access all the values in the table, and you are able to
fit the table in memory, you may be better off leaving memory mapping
off. However, if your table would not fit in memory, you should set this
to `True`.
When set to `True` then ``mask_invalid`` is set to `False` since the
masking would cause loading the full data array.
character_as_bytes : bool, optional
If `True`, string columns are stored as Numpy byte arrays (dtype ``S``)
and are converted on-the-fly to unicode strings when accessing
individual elements. If you need to use Numpy unicode arrays (dtype
``U``) internally, you should set this to `False`, but note that this
will use more memory. If set to `False`, string columns will not be
memory-mapped even if ``memmap`` is `True`.
unit_parse_strict : str, optional
Behaviour when encountering invalid column units in the FITS header.
Default is "warn", which will emit a ``UnitsWarning`` and create a
:class:`~astropy.units.core.UnrecognizedUnit`.
Values are the ones allowed by the ``parse_strict`` argument of
:class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``.
mask_invalid : bool, optional
By default the code masks NaNs in float columns and empty strings in
string columns. Set this parameter to `False` to avoid the performance
penalty of doing this masking step. The masking is always deactivated
when using ``memmap=True`` (see above).
"""
if isinstance(input, HDUList):
# Parse all table objects
tables = {}
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
tables[ihdu] = hdu_item
if len(tables) > 1:
if hdu is None:
warnings.warn(
"hdu= was not specified but multiple tables"
" are present, reading in first available"
f" table (hdu={first(tables)})",
AstropyUserWarning,
)
hdu = first(tables)
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in tables:
table = tables[hdu]
else:
raise ValueError(f"No table found in hdu={hdu}")
elif len(tables) == 1:
if hdu is not None:
msg = None
try:
hdi = input.index_of(hdu)
except KeyError:
msg = f"Specified hdu={hdu} not found"
else:
if hdi >= len(input):
msg = f"Specified hdu={hdu} not found"
elif hdi not in tables:
msg = f"No table found in specified hdu={hdu}"
if msg is not None:
warnings.warn(
f"{msg}, reading in first available table "
f"(hdu={first(tables)}) instead. This will"
" result in an error in future versions!",
AstropyDeprecationWarning,
)
table = tables[first(tables)]
else:
raise ValueError("No table found")
elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):
table = input
else:
if memmap:
# using memmap is not compatible with masking invalid value by
# default so we deactivate the masking
mask_invalid = False
hdulist = fits_open(input, character_as_bytes=character_as_bytes, memmap=memmap)
try:
return read_table_fits(
hdulist,
hdu=hdu,
astropy_native=astropy_native,
unit_parse_strict=unit_parse_strict,
mask_invalid=mask_invalid,
)
finally:
hdulist.close()
# In the loop below we access the data using data[col.name] rather than
# col.array to make sure that the data is scaled correctly if needed.
data = table.data
columns = []
for col in data.columns:
# Check if column is masked. Here, we make a guess based on the
# presence of FITS mask values. For integer columns, this is simply
# the null header, for float and complex, the presence of NaN, and for
# string, empty strings.
# Since Multi-element columns with dtypes such as '2f8' have a subdtype,
# we should look up the type of column on that.
masked = mask = False
coltype = col.dtype.subdtype[0].type if col.dtype.subdtype else col.dtype.type
if col.null is not None:
mask = data[col.name] == col.null
# Return a MaskedColumn even if no elements are masked so
# we roundtrip better.
masked = True
elif mask_invalid and issubclass(coltype, np.inexact):
mask = np.isnan(data[col.name])
elif mask_invalid and issubclass(coltype, np.character):
mask = col.array == b""
if masked or np.any(mask):
column = MaskedColumn(
data=data[col.name], name=col.name, mask=mask, copy=False
)
else:
column = Column(data=data[col.name], name=col.name, copy=False)
# Copy over units
if col.unit is not None:
column.unit = u.Unit(
col.unit, format="fits", parse_strict=unit_parse_strict
)
# Copy over display format
if col.disp is not None:
column.format = _fortran_to_python_format(col.disp)
columns.append(column)
# Create Table object
t = Table(columns, copy=False)
# TODO: deal properly with unsigned integers
hdr = table.header
if astropy_native:
# Avoid circular imports, and also only import if necessary.
from .fitstime import fits_to_time
hdr = fits_to_time(hdr, t)
for key, value, comment in hdr.cards:
if key in ["COMMENT", "HISTORY"]:
# Convert to io.ascii format
if key == "COMMENT":
key = "comments"
if key in t.meta:
t.meta[key].append(value)
else:
t.meta[key] = [value]
elif key in t.meta: # key is duplicate
if isinstance(t.meta[key], list):
t.meta[key].append(value)
else:
t.meta[key] = [t.meta[key], value]
elif is_column_keyword(key) or key in REMOVE_KEYWORDS:
pass
else:
t.meta[key] = value
# TODO: implement masking
# Decode any mixin columns that have been stored as standard Columns.
t = _decode_mixins(t)
return t
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
# Determine if information will be lost without serializing meta. This is hardcoded
# to the set difference between column info attributes and what FITS can store
# natively (name, dtype, unit). See _get_col_attributes() in table/meta.py for where
# this comes from.
info_lost = any(
any(
getattr(col.info, attr, None) not in (None, {})
for attr in ("description", "meta")
)
for col in tbl.itercols()
)
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table. This ignores
# Time-subclass columns and leave them in the table so that the downstream
# FITS Time handling does the right thing.
with serialize_context_as("fits"):
encode_tbl = serialize.represent_mixins_as_columns(tbl, exclude_classes=(Time,))
# If the encoded table is unchanged then there were no mixins. But if there
# is column metadata (format, description, meta) that would be lost, then
# still go through the serialized columns machinery.
if encode_tbl is tbl and not info_lost:
return tbl
# Copy the meta dict if it was not copied by represent_mixins_as_columns.
# We will modify .meta['comments'] below and we do not want to see these
# comments in the input table.
if encode_tbl is tbl:
meta_copy = deepcopy(tbl.meta)
encode_tbl = Table(tbl.columns, meta=meta_copy, copy=False)
# Get the YAML serialization of information describing the table columns.
# This is re-using ECSV code that combined existing table.meta with with
# the extra __serialized_columns__ key. For FITS the table.meta is handled
# by the native FITS connect code, so don't include that in the YAML
# output.
ser_col = "__serialized_columns__"
# encode_tbl might not have a __serialized_columns__ key if there were no mixins,
# but machinery below expects it to be available, so just make an empty dict.
encode_tbl.meta.setdefault(ser_col, {})
tbl_meta_copy = encode_tbl.meta.copy()
try:
encode_tbl.meta = {ser_col: encode_tbl.meta[ser_col]}
meta_yaml_lines = meta.get_yaml_from_table(encode_tbl)
finally:
encode_tbl.meta = tbl_meta_copy
del encode_tbl.meta[ser_col]
if "comments" not in encode_tbl.meta:
encode_tbl.meta["comments"] = []
encode_tbl.meta["comments"].append("--BEGIN-ASTROPY-SERIALIZED-COLUMNS--")
for line in meta_yaml_lines:
if len(line) == 0:
lines = [""]
else:
# Split line into 70 character chunks for COMMENT cards
idxs = list(range(0, len(line) + 70, 70))
lines = [line[i0:i1] + "\\" for i0, i1 in zip(idxs[:-1], idxs[1:])]
lines[-1] = lines[-1][:-1]
encode_tbl.meta["comments"].extend(lines)
encode_tbl.meta["comments"].append("--END-ASTROPY-SERIALIZED-COLUMNS--")
return encode_tbl
def write_table_fits(input, output, overwrite=False, append=False):
"""
Write a Table object to a FITS file.
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
append : bool
Whether to append the table to an existing file
"""
# Encode any mixin columns into standard Columns.
input = _encode_mixins(input)
table_hdu = table_to_hdu(input, character_as_bytes=True)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
elif not append:
raise OSError(NOT_OVERWRITING_MSG.format(output))
if append:
# verify=False stops it reading and checking the existing file.
fits_append(output, table_hdu.data, table_hdu.header, verify=False)
else:
table_hdu.writeto(output)
io_registry.register_reader("fits", Table, read_table_fits)
io_registry.register_writer("fits", Table, write_table_fits)
io_registry.register_identifier("fits", Table, is_fits)
|
2c3ad5b8dd4a44dafbb15486ceb1d193352cb1c2a03f7ffc6a6e9d0950ac7b5b | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import numbers
import operator
import re
import sys
import warnings
import weakref
from collections import OrderedDict
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import indent, isiterable, lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from .card import CARD_LENGTH, Card
from .util import NotifierMixin, _convert_array, _is_int, cmp, encode_ascii, pairwise
from .verify import VerifyError, VerifyWarning
__all__ = ["Column", "ColDefs", "Delayed"]
# mapping from TFORM data type to numpy data type (code)
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
FITS2NUMPY = {
"L": "i1",
"B": "u1",
"I": "i2",
"J": "i4",
"K": "i8",
"E": "f4",
"D": "f8",
"C": "c8",
"M": "c16",
"A": "a",
}
# the inverse dictionary of the above
NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}
# Normally booleans are represented as ints in Astropy, but if passed in a numpy
# boolean array, that should be supported
NUMPY2FITS["b1"] = "L"
# Add unsigned types, which will be stored as signed ints with a TZERO card.
NUMPY2FITS["u2"] = "I"
NUMPY2FITS["u4"] = "J"
NUMPY2FITS["u8"] = "K"
# Add half precision floating point numbers which will be up-converted to
# single precision.
NUMPY2FITS["f2"] = "E"
# This is the order in which values are converted to FITS types
# Note that only double precision floating point/complex are supported
FORMATORDER = ["L", "B", "I", "J", "K", "D", "M", "A"]
# Convert single precision floating point/complex to double precision.
FITSUPCONVERTERS = {"E": "D", "C": "M"}
# mapping from ASCII table TFORM data type to numpy data type
# A: Character
# I: Integer (32-bit)
# J: Integer (64-bit; non-standard)
# F: Float (64-bit; fixed decimal notation)
# E: Float (64-bit; exponential notation)
# D: Float (64-bit; exponential notation, always 64-bit by convention)
ASCII2NUMPY = {"A": "a", "I": "i4", "J": "i8", "F": "f8", "E": "f8", "D": "f8"}
# Maps FITS ASCII column format codes to the appropriate Python string
# formatting codes for that type.
ASCII2STR = {"A": "", "I": "d", "J": "d", "F": "f", "E": "E", "D": "E"}
# For each ASCII table format code, provides a default width (and decimal
# precision) for when one isn't given explicitly in the column format
ASCII_DEFAULT_WIDTHS = {
"A": (1, 0),
"I": (10, 0),
"J": (15, 0),
"E": (15, 7),
"F": (16, 7),
"D": (25, 17),
}
# TDISPn for both ASCII and Binary tables
TDISP_RE_DICT = {}
TDISP_RE_DICT["F"] = re.compile(
r"(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}(?P<precision>[0-9])+)+)|"
)
TDISP_RE_DICT["A"] = TDISP_RE_DICT["L"] = re.compile(
r"(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|"
)
TDISP_RE_DICT["I"] = TDISP_RE_DICT["B"] = TDISP_RE_DICT["O"] = TDISP_RE_DICT[
"Z"
] = re.compile(
r"(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)"
r"(?:\.{0,1}(?P<precision>[0-9]+))?))|"
)
TDISP_RE_DICT["E"] = TDISP_RE_DICT["G"] = TDISP_RE_DICT["D"] = re.compile(
r"(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\."
r"(?P<precision>[0-9]+))+)"
r"(?:E{0,1}(?P<exponential>[0-9]+)?)|"
)
TDISP_RE_DICT["EN"] = TDISP_RE_DICT["ES"] = re.compile(
r"(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}(?P<precision>[0-9])+)+)"
)
# mapping from TDISP format to python format
# A: Character
# L: Logical (Boolean)
# I: 16-bit Integer
# Can't predefine zero padding and space padding before hand without
# knowing the value being formatted, so grabbing precision and using that
# to zero pad, ignoring width. Same with B, O, and Z
# B: Binary Integer
# O: Octal Integer
# Z: Hexadecimal Integer
# F: Float (64-bit; fixed decimal notation)
# EN: Float (engineering fortran format, exponential multiple of thee
# ES: Float (scientific, same as EN but non-zero leading digit
# E: Float, exponential notation
# Can't get exponential restriction to work without knowing value
# before hand, so just using width and precision, same with D, G, EN, and
# ES formats
# D: Double-precision Floating Point with exponential
# (E but for double precision)
# G: Double-precision Floating Point, may or may not show exponent
TDISP_FMT_DICT = {
"I": "{{:{width}d}}",
"B": "{{:{width}b}}",
"O": "{{:{width}o}}",
"Z": "{{:{width}x}}",
"F": "{{:{width}.{precision}f}}",
"G": "{{:{width}.{precision}g}}",
}
TDISP_FMT_DICT["A"] = TDISP_FMT_DICT["L"] = "{{:>{width}}}"
TDISP_FMT_DICT["E"] = TDISP_FMT_DICT["D"] = TDISP_FMT_DICT["EN"] = TDISP_FMT_DICT[
"ES"
] = "{{:{width}.{precision}e}}"
# tuple of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
KEYWORD_NAMES = (
"TTYPE",
"TFORM",
"TUNIT",
"TNULL",
"TSCAL",
"TZERO",
"TDISP",
"TBCOL",
"TDIM",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
)
KEYWORD_ATTRIBUTES = (
"name",
"format",
"unit",
"null",
"bscale",
"bzero",
"disp",
"start",
"dim",
"coord_type",
"coord_unit",
"coord_ref_point",
"coord_ref_value",
"coord_inc",
"time_ref_pos",
)
"""This is a list of the attributes that can be set on `Column` objects."""
KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))
# TODO: Define a list of default comments to associate with each table keyword
# TFORMn regular expression
TFORMAT_RE = re.compile(
r"(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])(?P<option>[!-~]*)", re.I
)
# TFORMn for ASCII tables; two different versions depending on whether
# the format is floating-point or not; allows empty values for width
# in which case defaults are used
TFORMAT_ASCII_RE = re.compile(
r"(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|"
r"(?:(?P<formatf>[FED])"
r"(?:(?P<widthf>[0-9]+)(?:\."
r"(?P<precision>[0-9]+))?)?)"
)
TTYPE_RE = re.compile(r"[0-9a-zA-Z_]+")
"""
Regular expression for valid table column names. See FITS Standard v3.0 section 7.2.2.
"""
# table definition keyword regular expression
TDEF_RE = re.compile(r"(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)")
# table dimension keyword regular expression (fairly flexible with whitespace)
TDIM_RE = re.compile(r"\(\s*(?P<dims>(?:\d+\s*)(?:,\s*\d+\s*)*\s*)\)\s*")
# value for ASCII table cell with value = TNULL
# this can be reset by user.
ASCIITNULL = 0
# The default placeholder to use for NULL values in ASCII tables when
# converting from binary to ASCII tables
DEFAULT_ASCII_TNULL = "---"
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = weakref.proxy(hdu)
self.field = field
def __getitem__(self, key):
# This forces the data for the HDU to be read, which will replace
# the corresponding Delayed objects in the Tables Columns to be
# transformed into ndarrays. It will also return the value of the
# requested data element.
return self.hdu.data[key][self.field]
class _BaseColumnFormat(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
class _ColumnFormat(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ("P", "Q"):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == "P":
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ""
else:
repeat = str(self.repeat)
return f"{repeat}{self.format}{self.option}"
class _AsciiColumnFormat(_BaseColumnFormat):
"""Similar to _ColumnFormat but specifically for columns in ASCII tables.
The formats of ASCII table columns and binary table columns are inherently
incompatible in FITS. They don't support the same ranges and types of
values, and even reuse format codes in subtly different ways. For example
the format code 'Iw' in ASCII columns refers to any integer whose string
representation is at most w characters wide, so 'I' can represent
effectively any integer that will fit in a FITS columns. Whereas for
binary tables 'I' very explicitly refers to a 16-bit signed integer.
Conversions between the two column formats can be performed using the
``to/from_binary`` methods on this class, or the ``to/from_ascii``
methods on the `_ColumnFormat` class. But again, not all conversions are
possible and may result in a `ValueError`.
"""
def __new__(cls, format, strict=False):
self = super().__new__(cls, format)
self.format, self.width, self.precision = _parse_ascii_tformat(format, strict)
# If no width has been specified, set the dtype here to default as well
if format == self.format:
self.recformat = ASCII2NUMPY[format]
# This is to support handling logical (boolean) data from binary tables
# in an ASCII table
self._pseudo_logical = False
return self
@classmethod
def from_column_format(cls, format):
inst = cls.from_recformat(format.recformat)
# Hack
if format.format == "L":
inst._pseudo_logical = True
return inst
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_ascii_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_ascii_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of Tw.d where T is the single character data
type code, w is the width in characters for this field, and d is the
number of digits after the decimal place (for format codes 'E', 'F',
and 'D' only).
"""
if self.format in ("E", "F", "D"):
return f"{self.format}{self.width}.{self.precision}"
return f"{self.format}{self.width}"
class _FormatX(str):
"""For X format in binary tables."""
def __new__(cls, repeat=1):
nbytes = ((repeat - 1) // 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
obj = super().__new__(cls, repr((nbytes,)) + "u1")
obj.repeat = repeat
return obj
def __getnewargs__(self):
return (self.repeat,)
@property
def tform(self):
return f"{self.repeat}X"
# TODO: Table column formats need to be verified upon first reading the file;
# as it is, an invalid P format will raise a VerifyError from some deep,
# unexpected place
class _FormatP(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (
r"(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])(?:\((?P<max>\d*)\))?"
)
_format_code = "P"
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = "2i4"
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group("dtype") not in FITS2NUMPY:
raise VerifyError(f"Invalid column format: {format}")
repeat = m.group("repeat")
array_dtype = m.group("dtype")
max = m.group("max")
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = "" if self.repeat is None else self.repeat
max = "" if self.max is None else self.max
return f"{repeat}{self._format_code}{self.format}({max})"
class _FormatQ(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = "Q"
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = "2i8"
class ColumnAttribute:
"""
Descriptor for attributes of `Column` that are associated with keywords
in the FITS header and describe properties of the column as specified in
the FITS standard.
Each `ColumnAttribute` may have a ``validator`` method defined on it.
This validates values set on this attribute to ensure that they meet the
FITS standard. Invalid values will raise a warning and will not be used in
formatting the column. The validator should take two arguments--the
`Column` it is being assigned to, and the new value for the attribute, and
it must raise an `AssertionError` if the value is invalid.
The `ColumnAttribute` itself is a decorator that can be used to define the
``validator`` for each column attribute. For example::
@ColumnAttribute('TTYPE')
def name(col, name):
if not isinstance(name, str):
raise AssertionError
The actual object returned by this decorator is the `ColumnAttribute`
instance though, not the ``name`` function. As such ``name`` is not a
method of the class it is defined in.
The setter for `ColumnAttribute` also updates the header of any table
HDU this column is attached to in order to reflect the change. The
``validator`` should ensure that the value is valid for inclusion in a FITS
header.
"""
def __init__(self, keyword):
self._keyword = keyword
self._validator = None
# The name of the attribute associated with this keyword is currently
# determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
# make more flexible in the future, for example, to support custom
# column attributes.
self._attr = "_" + KEYWORD_TO_ATTRIBUTE[self._keyword]
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return getattr(obj, self._attr)
def __set__(self, obj, value):
if self._validator is not None:
self._validator(obj, value)
old_value = getattr(obj, self._attr, None)
setattr(obj, self._attr, value)
obj._notify("column_attribute_changed", obj, self._attr[1:], old_value, value)
def __call__(self, func):
"""
Set the validator for this column attribute.
Returns ``self`` so that this can be used as a decorator, as described
in the docs for this class.
"""
self._validator = func
return self
def __repr__(self):
return f"{self.__class__.__name__}('{self._keyword}')"
class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(
self,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
array=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`astropy:column_creation`
and :ref:`astropy:creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError("Must specify format to construct Column.")
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {"ascii": ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ["The following keyword arguments to Column were invalid:"]
for val in invalid_kwargs.values():
msg.append(indent(val[1]))
raise VerifyError("\n".join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs["recformat"]
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array, (np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError(
f"Data is inconsistent with the format `{format}`."
)
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ""
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + " = " + repr(value) + "; "
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if "array" in self.__dict__:
return self.__dict__["array"]
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if hasattr(base, "_coldefs") and isinstance(base._coldefs, ColDefs):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if "array" in self.__dict__:
del self.__dict__["array"]
return
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self.__dict__["array"] = array
@array.deleter
def array(self):
try:
del self.__dict__["array"]
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute("TTYPE")
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
"It is strongly recommended that column names contain only "
"upper and lower-case ASCII letters, digits, or underscores "
"for maximum compatibility with other software "
"(got {!r}).".format(name),
VerifyWarning,
)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if not isinstance(name, str) or len(str(Card("TTYPE", name))) != CARD_LENGTH:
raise AssertionError(
"Column name must be a string able to fit in a single "
"FITS card--typically this means a maximum of 68 "
"characters, though it may be fewer if the string "
"contains special characters like quotes."
)
@ColumnAttribute("TCTYP")
def coord_type(col, coord_type):
if coord_type is None:
return
if not isinstance(coord_type, str) or len(coord_type) > 8:
raise AssertionError(
"Coordinate/axis type must be a string of atmost 8 characters."
)
@ColumnAttribute("TCUNI")
def coord_unit(col, coord_unit):
if coord_unit is not None and not isinstance(coord_unit, str):
raise AssertionError("Coordinate/axis unit must be a string.")
@ColumnAttribute("TCRPX")
def coord_ref_point(col, coord_ref_point):
if coord_ref_point is not None and not isinstance(
coord_ref_point, numbers.Real
):
raise AssertionError(
"Pixel coordinate of the reference point must be real floating type."
)
@ColumnAttribute("TCRVL")
def coord_ref_value(col, coord_ref_value):
if coord_ref_value is not None and not isinstance(
coord_ref_value, numbers.Real
):
raise AssertionError(
"Coordinate value at reference point must be real floating type."
)
@ColumnAttribute("TCDLT")
def coord_inc(col, coord_inc):
if coord_inc is not None and not isinstance(coord_inc, numbers.Real):
raise AssertionError("Coordinate increment must be real floating type.")
@ColumnAttribute("TRPOS")
def time_ref_pos(col, time_ref_pos):
if time_ref_pos is not None and not isinstance(time_ref_pos, str):
raise AssertionError("Time reference position must be a string.")
format = ColumnAttribute("TFORM")
unit = ColumnAttribute("TUNIT")
null = ColumnAttribute("TNULL")
bscale = ColumnAttribute("TSCAL")
bzero = ColumnAttribute("TZERO")
disp = ColumnAttribute("TDISP")
start = ColumnAttribute("TBCOL")
dim = ColumnAttribute("TDIM")
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format="I") # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError(f"Illegal format `{format}`.")
return format, recformat
@classmethod
def _verify_keywords(
cls,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
try:
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
except (ValueError, VerifyError) as err:
msg = (
f"Column format option (TFORMn) failed verification: {err!s} "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
except AttributeError as err:
msg = (
"Column format option (TFORMn) must be a string with a valid "
f"FITS table format (got {format!s}: {err!s}). "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
for k, v in [
("name", name),
("unit", unit),
("bscale", bscale),
("bzero", bzero),
]:
if v is not None and v != "":
valid[k] = v
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != "":
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
"(got {!r}).".format(null)
)
else:
tnull_formats = ("B", "I", "J", "K")
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
"Column null option (TNULLn) must be an integer for "
"binary table columns (got {!r}). The invalid value "
"will be ignored for the purpose of formatting "
"the data in this column.".format(null)
)
elif not (
format.format in tnull_formats
or (
format.format in ("P", "Q") and format.p_format in tnull_formats
)
):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
"Column null option (TNULLn) is invalid for binary "
"table columns of type {!r} (got {!r}). The invalid "
"value will be ignored for the purpose of formatting "
"the data in this column.".format(format, null)
)
if msg is None:
valid["null"] = null
else:
invalid["null"] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != "":
msg = None
if not isinstance(disp, str):
msg = (
"Column disp option (TDISPn) must be a string (got "
f"{disp!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
elif isinstance(format, _AsciiColumnFormat) and disp[0].upper() == "L":
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column."
)
if msg is None:
try:
_parse_tdisp_format(disp)
valid["disp"] = disp
except VerifyError as err:
msg = (
"Column disp option (TDISPn) failed verification: "
f"{err!s} The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
invalid["disp"] = (disp, msg)
else:
invalid["disp"] = (disp, msg)
# Validate the start option
if start is not None and start != "":
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
"Column start option (TBCOLn) is not allowed for binary "
"table columns (got {!r}). The invalid keyword will be "
"ignored for the purpose of formatting the data in this "
"column.".format(start)
)
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
"Column start option (TBCOLn) must be a positive integer "
"(got {!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column.".format(start)
)
if msg is None:
valid["start"] = start
else:
invalid["start"] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != "":
msg = None
dims_tuple = ()
# NOTE: If valid, the dim keyword's value in the the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
"Column dim option (TDIMn) is not allowed for ASCII table "
"columns (got {!r}). The invalid keyword will be ignored "
"for the purpose of formatting this column.".format(dim)
)
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column."
)
if dims_tuple:
if isinstance(recformat, _FormatP):
# TDIMs have different meaning for VLA format,
# no warning should be thrown
msg = None
elif reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
"The repeat count of the column format {!r} for column {!r} "
"is fewer than the number of elements per the TDIM "
"argument {!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column.".format(
name, format, dim
)
)
if msg is None:
valid["dim"] = dims_tuple
else:
invalid["dim"] = (dim, msg)
if coord_type is not None and coord_type != "":
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_type)
)
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"of atmost 8 characters (got {!r}). The invalid keyword "
"will be ignored for the purpose of formatting this "
"column.".format(coord_type)
)
if msg is None:
valid["coord_type"] = coord_type
else:
invalid["coord_type"] = (coord_type, msg)
if coord_unit is not None and coord_unit != "":
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_unit)
)
if msg is None:
valid["coord_unit"] = coord_unit
else:
invalid["coord_unit"] = (coord_unit, msg)
for k, v in [
("coord_ref_point", coord_ref_point),
("coord_ref_value", coord_ref_value),
("coord_inc", coord_inc),
]:
if v is not None and v != "":
msg = None
if not isinstance(v, numbers.Real):
msg = (
"Column {} option ({}n) must be a real floating type (got"
" {!r}). The invalid value will be ignored for the purpose of"
" formatting the data in this column.".format(
k, ATTRIBUTE_TO_KEYWORD[k], v
)
)
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != "":
msg = None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
"a string (got {!r}). The invalid keyword will be ignored for "
"the purpose of formatting this column.".format(time_ref_pos)
)
if msg is None:
valid["time_ref_pos"] = time_ref_pos
else:
invalid["time_ref_pos"] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format, _AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
"Columns cannot have both a start (TCOLn) and dim "
"(TDIMn) option, since the former is only applies to "
"ASCII tables, and the latter is only valid for binary tables."
)
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (
_AsciiColumnFormat if guess_format is _ColumnFormat else _ColumnFormat
)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims and format.format not in "PQ":
shape = dims[:-1] if "A" in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if "P" in format or "Q" in format:
return array
elif "A" in format:
if array.dtype.char in "SU":
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif "L" in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype("bool"):
return np.where(array == np.False_, ord("F"), ord("T"))
else:
return np.where(array == 0, ord("F"), ord("T"))
elif "X" in format:
return _convert_array(array, np.dtype("uint8"))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {
2: np.uint16(2**15),
4: np.uint32(2**31),
8: np.uint64(2**63),
}
if (
array.dtype.kind == "u"
and array.dtype.itemsize in bzeros
and self.bscale in (1, None, "")
and self.bzero == bzeros[array.dtype.itemsize]
):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace("i", "u")
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
class ColDefs(NotifierMixin):
"""
Column definitions class.
It has attributes corresponding to the `Column` attributes
(e.g. `ColDefs` has the attribute ``names`` while `Column`
has ``name``). Each attribute in `ColDefs` is a list of
corresponding attribute values from all `Column` objects.
"""
_padding_byte = "\x00"
_col_format_cls = _ColumnFormat
def __new__(cls, input, ascii=False):
klass = cls
if hasattr(input, "_columns_type") and issubclass(input._columns_type, ColDefs):
klass = input._columns_type
elif hasattr(input, "_col_format_cls") and issubclass(
input._col_format_cls, _AsciiColumnFormat
):
klass = _AsciiColDefs
if ascii: # force ASCII if this has been explicitly requested
klass = _AsciiColDefs
return object.__new__(klass)
def __getnewargs__(self):
return (self._arrays,)
def __init__(self, input, ascii=False):
"""
Parameters
----------
input : sequence of `Column` or `ColDefs` or ndarray or `~numpy.recarray`
An existing table HDU, an existing `ColDefs`, or any multi-field
Numpy array or `numpy.recarray`.
ascii : bool
Use True to ensure that ASCII table columns are used.
"""
from .fitsrec import FITS_rec
from .hdu.table import _TableBaseHDU
if isinstance(input, ColDefs):
self._init_from_coldefs(input)
elif (
isinstance(input, FITS_rec)
and hasattr(input, "_coldefs")
and input._coldefs
):
# If given a FITS_rec object we can directly copy its columns, but
# only if its columns have already been defined, otherwise this
# will loop back in on itself and blow up
self._init_from_coldefs(input._coldefs)
elif isinstance(input, np.ndarray) and input.dtype.fields is not None:
# Construct columns from the fields of a record array
self._init_from_array(input)
elif isiterable(input):
# if the input is a list of Columns
self._init_from_sequence(input)
elif isinstance(input, _TableBaseHDU):
# Construct columns from fields in an HDU header
self._init_from_table(input)
else:
raise TypeError(
"Input to ColDefs must be a table HDU, a list "
"of Columns, or a record/field array."
)
# Listen for changes on all columns
for col in self.columns:
col._add_listener(self)
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
"""
self.columns = [self._copy_column(col) for col in coldefs]
def _init_from_sequence(self, columns):
for idx, col in enumerate(columns):
if not isinstance(col, Column):
raise TypeError(f"Element {idx} in the ColDefs input is not a Column.")
self._init_from_coldefs(columns)
def _init_from_array(self, array):
self.columns = []
for idx in range(len(array.dtype)):
cname = array.dtype.names[idx]
ftype = array.dtype.fields[cname][0]
if ftype.kind == "O":
dtypes = {np.array(array[cname][i]).dtype for i in range(len(array))}
if (len(dtypes) > 1) or (np.dtype("O") in dtypes):
raise TypeError(
f"Column '{cname}' contains unsupported object types or "
f"mixed types: {dtypes}"
)
ftype = dtypes.pop()
format = self._col_format_cls.from_recformat(ftype)
format = f"P{format}()"
else:
format = self._col_format_cls.from_recformat(ftype)
# Determine the appropriate dimensions for items in the column
dim = array.dtype[idx].shape[::-1]
if dim and (len(dim) > 0 or "A" in format):
if "A" in format:
# should take into account multidimensional items in the column
dimel = int(re.findall("[0-9]+", str(ftype.subdtype[0]))[0])
# n x m string arrays must include the max string
# length in their dimensions (e.g. l x n x m)
dim = (dimel,) + dim
dim = "(" + ",".join(str(d) for d in dim) + ")"
else:
dim = None
# Check for unsigned ints.
bzero = None
if ftype.base.kind == "u":
if "I" in format:
bzero = np.uint16(2**15)
elif "J" in format:
bzero = np.uint32(2**31)
elif "K" in format:
bzero = np.uint64(2**63)
c = Column(
name=cname,
format=format,
array=array.view(np.ndarray)[cname],
bzero=bzero,
dim=dim,
)
self.columns.append(c)
def _init_from_table(self, table):
hdr = table._header
nfields = hdr["TFIELDS"]
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
col_keywords = [{} for i in range(nfields)]
for keyword in hdr:
key = TDEF_RE.match(keyword)
try:
label = key.group("label")
except Exception:
continue # skip if there is no match
if label in KEYWORD_NAMES:
col = int(key.group("num"))
if 0 < col <= nfields:
attr = KEYWORD_TO_ATTRIBUTE[label]
value = hdr[keyword]
if attr == "format":
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
value = self._col_format_cls(value)
col_keywords[col - 1][attr] = value
# Verify the column keywords and display any warnings if necessary;
# we only want to pass on the valid keywords
for idx, kwargs in enumerate(col_keywords):
valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)
for val in invalid_kwargs.values():
warnings.warn(
f"Invalid keyword for column {idx + 1}: {val[1]}", VerifyWarning
)
# Special cases for recformat and dim
# TODO: Try to eliminate the need for these special cases
del valid_kwargs["recformat"]
if "dim" in valid_kwargs:
valid_kwargs["dim"] = kwargs["dim"]
col_keywords[idx] = valid_kwargs
# data reading will be delayed
for col in range(nfields):
col_keywords[col]["array"] = Delayed(table, col)
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
# Add the table HDU is a listener to changes to the columns
# (either changes to individual columns, or changes to the set of
# columns (add/remove/etc.))
self._add_listener(table)
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
return self.__class__([copy.deepcopy(c, memo) for c in self.columns])
def _copy_column(self, column):
"""Utility function used currently only by _init_from_coldefs
to help convert columns from binary format to ASCII format or vice
versa if necessary (otherwise performs a straight copy).
"""
if isinstance(column.format, self._col_format_cls):
# This column has a FITS format compatible with this column
# definitions class (that is ascii or binary)
return column.copy()
new_column = column.copy()
# Try to use the Numpy recformat as the equivalency between the
# two formats; if that conversion can't be made then these
# columns can't be transferred
# TODO: Catch exceptions here and raise an explicit error about
# column format conversion
new_column.format = self._col_format_cls.from_column_format(column.format)
# Handle a few special cases of column format options that are not
# compatible between ASCII an binary tables
# TODO: This is sort of hacked in right now; we really need
# separate classes for ASCII and Binary table Columns, and they
# should handle formatting issues like these
if not isinstance(new_column.format, _AsciiColumnFormat):
# the column is a binary table column...
new_column.start = None
if new_column.null is not None:
# We can't just "guess" a value to represent null
# values in the new column, so just disable this for
# now; users may modify it later
new_column.null = None
else:
# the column is an ASCII table column...
if new_column.null is not None:
new_column.null = DEFAULT_ASCII_TNULL
if new_column.disp is not None and new_column.disp.upper().startswith("L"):
# ASCII columns may not use the logical data display format;
# for now just drop the TDISPn option for this column as we
# don't have a systematic conversion of boolean data to ASCII
# tables yet
new_column.disp = None
return new_column
def __getattr__(self, name):
"""
Automatically returns the values for the given keyword attribute for
all `Column`s in this list.
Implements for example self.units, self.formats, etc.
"""
cname = name[:-1]
if cname in KEYWORD_ATTRIBUTES and name[-1] == "s":
attr = []
for col in self.columns:
val = getattr(col, cname)
attr.append(val if val is not None else "")
return attr
raise AttributeError(name)
@lazyproperty
def dtype(self):
# Note: This previously returned a dtype that just used the raw field
# widths based on the format's repeat count, and did not incorporate
# field *shapes* as provided by TDIMn keywords.
# Now this incorporates TDIMn from the start, which makes *this* method
# a little more complicated, but simplifies code elsewhere (for example
# fields will have the correct shapes even in the raw recarray).
formats = []
offsets = [0]
for format_, dim in zip(self.formats, self._dims):
dt = format_.dtype
if len(offsets) < len(self.formats):
# Note: the size of the *original* format_ may be greater than
# one would expect from the number of elements determined by
# dim. The FITS format allows this--the rest of the field is
# filled with undefined values.
offsets.append(offsets[-1] + dt.itemsize)
if dim and format_.format not in "PQ":
# Note: VLA array descriptors should not be reshaped
# as they are always of shape (2,)
if format_.format == "A":
dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))
else:
dt = np.dtype((dt.base, dim))
formats.append(dt)
return np.dtype({"names": self.names, "formats": formats, "offsets": offsets})
@lazyproperty
def names(self):
return [col.name for col in self.columns]
@lazyproperty
def formats(self):
return [col.format for col in self.columns]
@lazyproperty
def _arrays(self):
return [col.array for col in self.columns]
@lazyproperty
def _recformats(self):
return [fmt.recformat for fmt in self.formats]
@lazyproperty
def _dims(self):
"""Returns the values of the TDIMn keywords parsed into tuples."""
return [col._dims for col in self.columns]
def __getitem__(self, key):
if isinstance(key, str):
key = _get_index(self.names, key)
x = self.columns[key]
if _is_int(key):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.columns)
def __repr__(self):
rep = "ColDefs("
if hasattr(self, "columns") and self.columns:
# The hasattr check is mostly just useful in debugging sessions
# where self.columns may not be defined yet
rep += "\n "
rep += "\n ".join([repr(c) for c in self.columns])
rep += "\n"
rep += ")"
return rep
def __add__(self, other, option="left"):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.columns)
else:
raise TypeError("Wrong type of input.")
if option == "left":
tmp = list(self.columns) + b
else:
tmp = b + list(self.columns)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, "right")
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx = list(range(len(self)))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _update_column_attribute_changed(self, column, attr, old_value, new_value):
"""
Handle column attribute changed notifications from columns that are
members of this `ColDefs`.
`ColDefs` itself does not currently do anything with this, and just
bubbles the notification up to any listening table HDUs that may need
to update their headers, etc. However, this also informs the table of
the numerical index of the column that changed.
"""
idx = 0
for idx, col in enumerate(self.columns):
if col is column:
break
if attr == "name":
del self.names
elif attr == "format":
del self.formats
self._notify(
"column_attribute_changed", column, idx, attr, old_value, new_value
)
def add_col(self, column):
"""
Append one `Column` to the column definition.
"""
if not isinstance(column, Column):
raise AssertionError
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
self.columns.append(column)
# Listen for changes on the new column
column._add_listener(self)
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
self._notify("column_added", self, column)
return self
def del_col(self, col_name):
"""
Delete (the definition of) one `Column`.
col_name : str or int
The column's name or index
"""
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
indx = _get_index(self.names, col_name)
col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
del self.columns[indx]
col._remove_listener(self)
# If this ColDefs is being tracked by a table HDU, inform the HDU (or
# any other listeners) that the column has been removed
# Just send a reference to self, and the index of the column that was
# removed
self._notify("column_removed", self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
"""
Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.
Parameters
----------
col_name : str or int
The column name or index to change
attrib : str
The attribute name
new_value : object
The new value for the attribute
"""
setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
Change a `Column`'s name.
Parameters
----------
col_name : str
The current name of the column
new_name : str
The new name of the column
"""
if new_name != col_name and new_name in self.names:
raise ValueError(f"New name {new_name} already exists.")
else:
self.change_attrib(col_name, "name", new_name)
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
Parameters
----------
col_name : str or int
The column name or index
new_unit : str
The new unit for the column
"""
self.change_attrib(col_name, "unit", new_unit)
def info(self, attrib="all", output=None):
"""
Get attribute(s) information of the column definition.
Parameters
----------
attrib : str
Can be one or more of the attributes listed in
``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is
``"all"`` which will print out all attributes. It forgives plurals
and blanks. If there are two or more attribute names, they must be
separated by comma(s).
output : file-like, optional
File-like object to output to. Outputs to stdout by default.
If `False`, returns the attributes as a `dict` instead.
Notes
-----
This function doesn't return anything by default; it just prints to
stdout.
"""
if output is None:
output = sys.stdout
if attrib.strip().lower() in ["all", ""]:
lst = KEYWORD_ATTRIBUTES
else:
lst = attrib.split(",")
for idx in range(len(lst)):
lst[idx] = lst[idx].strip().lower()
if lst[idx][-1] == "s":
lst[idx] = list[idx][:-1]
ret = {}
for attr in lst:
if output:
if attr not in KEYWORD_ATTRIBUTES:
output.write(
f"'{attr}' is not an attribute of the column definitions.\n"
)
continue
output.write(f"{attr}:\n")
output.write(f" {getattr(self, attr + 's')}\n")
else:
ret[attr] = getattr(self, attr + "s")
if not output:
return ret
class _AsciiColDefs(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = " "
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = "S" + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ["a" + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
class _VLF(np.ndarray):
"""Variable length field object."""
def __new__(cls, input, dtype="a"):
"""
Parameters
----------
input
a sequence of variable-sized elements.
"""
if dtype == "a":
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
input = [chararray.array(x, itemsize=1) for x in input]
except Exception:
raise ValueError(f"Inconsistent input data array: {input}")
a = np.array(input, dtype=object)
self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a, dtype=object)
self.max = 0
self.element_dtype = dtype
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.max = obj.max
self.element_dtype = obj.element_dtype
def __setitem__(self, key, value):
"""
To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, np.ndarray) and value.dtype == self.dtype:
pass
elif isinstance(value, chararray.chararray) and value.itemsize == 1:
pass
elif self.element_dtype == "a":
value = chararray.array(value, itemsize=1)
else:
value = np.array(value, dtype=self.element_dtype)
np.ndarray.__setitem__(self, key, value)
nelem = value.shape
len_value = np.prod(nelem)
self.max = max(self.max, len_value)
def tolist(self):
return [list(item) for item in super().tolist()]
def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError(f"Key '{key}' does not exist.")
else: # multiple match
raise KeyError(f"Ambiguous key name '{key}'.")
else:
raise KeyError(f"Illegal key '{key!r}'.")
return indx
def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype="uint8")
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])
def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i])
def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == "a":
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == "a":
rowval = " " * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == "a":
data_output[idx] = chararray.array(encode_ascii(rowval), itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
nelem = data_output[idx].shape
descr_output[idx, 0] = np.prod(nelem)
descr_output[idx, 1] = _offset
_offset += descr_output[idx, 0] * _nbytes
return data_output
def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError(f"Format {tform!r} is not recognized.")
if repeat == "":
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option)
def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError(f"Format {tform!r} is not recognized.")
# Be flexible on case
format = match.group("format")
if format is None:
# Floating point format
format = match.group("formatf").upper()
width = match.group("widthf")
precision = match.group("precision")
if width is None or precision is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group("width")
if width is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = (
"Format {!r} is not valid--field width and decimal precision "
"must be integers."
)
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError(
f"Format {tform!r} not valid--field width must be a positive integeter."
)
if precision >= width:
raise VerifyError(
f"Format {tform!r} not valid--the number of decimal digits "
f"must be less than the format's total width {width}."
)
return format, width, precision
def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group("dims")
return tuple(int(d.strip()) for d in dims.split(","))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return ()
def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value))
def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == "a" and f2[0] == "a":
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))
def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == "A":
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == "A" and option != "":
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ""
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == "X":
output_format = _FormatX(repeat)
elif dtype == "P":
output_format = _FormatP.from_tform(format)
elif dtype == "Q":
output_format = _FormatQ.from_tform(format)
elif dtype == "F":
output_format = "f8"
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == "U" or (
dtype.subdtype is not None and dtype.subdtype[0].char == "U"
):
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base and dtype.subdtype --dtype for multi-dimensional items
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype="i8").prod()
if nel > 1:
repeat = nel
if kind == "a":
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + "A"
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ""
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
In particular, use of 'a' to refer to character data is long since
deprecated in Numpy, but Astropy remains heavily invested in its use
(something to try to get away from sooner rather than later).
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ("U", "S"):
recformat = kind = "a"
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype
def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format)
def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == "a":
return "A" + str(itemsize)
elif NUMPY2FITS.get(recformat) == "L":
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return "A1"
elif kind == "i":
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS["I"][0])
return "I" + str(width)
elif kind == "f":
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = "D"
else:
format = "E"
width = ".".join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'a', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values, if width >= 10 we may need to accommodate 64-bit ints.
# values [for the non-standard J format code just always force 64-bit]
if format == "I":
if width <= 4:
recformat = "i2"
elif width > 9:
recformat = "i8"
elif format == "A":
recformat += str(width)
return recformat
def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = (
tdisp[0]
if tdisp[0] != "E" or (len(tdisp) > 1 and tdisp[1] not in "NS")
else tdisp[:2]
)
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f"Format {tdisp} is not recognized.")
match = tdisp_re.match(tdisp.strip())
if not match or match.group("formatc") is None:
raise VerifyError(f"Format {tdisp} is not recognized.")
formatc = match.group("formatc")
width = match.group("width")
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ("I", "B", "O", "Z", "F", "E", "G", "D"):
precision = match.group("precision")
if precision is None:
precision = 1
if tdisp[0] in ("E", "D", "G") and tdisp[1] not in ("N", "S"):
exponential = match.group("exponential")
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError(f"Format {format_type} is not recognized.")
def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {
"a": "A",
"s": "A",
"d": "I",
"b": "B",
"o": "O",
"x": "Z",
"X": "Z",
"f": "F",
"F": "F",
"g": "G",
"G": "G",
"e": "E",
"E": "E",
}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == "{" and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip("}")
elif format_string[0] == "%":
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = "", ""
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == ">" and fmt_str[1] != "0":
ftype = fmt_to_tdisp["a"]
width = fmt_str[1:]
elif fmt_str[-1] == "s" and fmt_str != "s":
ftype = fmt_to_tdisp["a"]
width = fmt_str[:-1].lstrip("0")
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != "0":
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if "." in fmt_str:
width, precision = fmt_str.split(".")
sep = "."
if width == "":
key = ftype if ftype != "G" else "F"
width = str(
int(precision)
+ (ASCII_DEFAULT_WIDTHS[key][0] - ASCII_DEFAULT_WIDTHS[key][1])
)
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn(
f"Format {format_string} cannot be mapped to the accepted TDISPn "
"keyword values. Format will not be moved into TDISPn keyword.",
AstropyUserWarning,
)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = "L"
return ftype + width + sep + precision
|
b46e03b7f20deef0de4d4ccff573d7eae78943d818ae8d77242410f32ea1755b | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import operator
import warnings
import weakref
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import lazyproperty
from .column import (
_VLF,
ASCII2NUMPY,
ASCII2STR,
ASCIITNULL,
FITS2NUMPY,
ColDefs,
Delayed,
_AsciiColDefs,
_FormatP,
_FormatX,
_get_index,
_makep,
_unwrapx,
_wrapx,
)
from .util import _rstrip_inplace, decode_ascii, encode_ascii
class FITS_record:
"""
FITS record class.
`FITS_record` is used to access records of the `FITS_rec` object.
This will allow us to deal with scaled columns. It also handles
conversion/scaling of columns in ASCII tables. The `FITS_record`
class expects a `FITS_rec` object as input.
"""
def __init__(
self, input, row=0, start=None, end=None, step=None, base=None, **kwargs
):
"""
Parameters
----------
input : array
The array to wrap.
row : int, optional
The starting logical row of the array.
start : int, optional
The starting column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
end : int, optional
The ending column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
"""
self.array = input
self.row = row
if base:
width = len(base)
else:
width = self.array._nfields
s = slice(start, end, step).indices(width)
self.start, self.end, self.step = s
self.base = base
def __getitem__(self, key):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
return type(self)(self.array, self.row, key.start, key.stop, key.step, self)
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
return self.array.field(indx)[self.row]
def __setitem__(self, key, value):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
for indx in range(slice.start, slice.stop, slice.step):
indx = self._get_indx(indx)
self.array.field(indx)[self.row] = value
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
self.array.field(indx)[self.row] = value
def __len__(self):
return len(range(self.start, self.end, self.step))
def __repr__(self):
"""
Display a single row.
"""
outlist = []
for idx in range(len(self)):
outlist.append(repr(self[idx]))
return f"({', '.join(outlist)})"
def field(self, field):
"""
Get the field data of the record.
"""
return self.__getitem__(field)
def setfield(self, field, value):
"""
Set the field data of the record.
"""
self.__setitem__(field, value)
@lazyproperty
def _bases(self):
bases = [weakref.proxy(self)]
base = self.base
while base:
bases.append(base)
base = base.base
return bases
def _get_index(self, indx):
indices = np.ogrid[: self.array._nfields]
for base in reversed(self._bases):
if base.step < 1:
s = slice(base.start, None, base.step)
else:
s = slice(base.start, base.end, base.step)
indices = indices[s]
return indices[indx]
class FITS_rec(np.recarray):
"""
FITS record array class.
`FITS_rec` is the data part of a table HDU's data part. This is a layer
over the `~numpy.recarray`, so we can deal with scaled columns.
It inherits all of the standard methods from `numpy.ndarray`.
"""
_record_type = FITS_record
_character_as_bytes = False
_load_variable_length_data = True
def __new__(subtype, input):
"""
Construct a FITS record array from a recarray.
"""
# input should be a record array
if input.dtype.subdtype is None:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data
)
else:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data, strides=input.strides
)
self._init()
if self.dtype.fields:
self._nfields = len(self.dtype.fields)
return self
def __setstate__(self, state):
meta = state[-1]
column_state = state[-2]
state = state[:-2]
super().__setstate__(state)
self._col_weakrefs = weakref.WeakSet()
for attr, value in zip(meta, column_state):
setattr(self, attr, value)
def __reduce__(self):
"""
Return a 3-tuple for pickling a FITS_rec. Use the super-class
functionality but then add in a tuple of FITS_rec-specific
values that get used in __setstate__.
"""
reconst_func, reconst_func_args, state = super().__reduce__()
# Define FITS_rec-specific attrs that get added to state
column_state = []
meta = []
for attrs in [
"_converted",
"_heapoffset",
"_heapsize",
"_nfields",
"_gap",
"_uint",
"parnames",
"_coldefs",
]:
with suppress(AttributeError):
# _coldefs can be Delayed, and file objects cannot be
# picked, it needs to be deepcopied first
if attrs == "_coldefs":
column_state.append(self._coldefs.__deepcopy__(None))
else:
column_state.append(getattr(self, attrs))
meta.append(attrs)
state = state + (column_state, meta)
return reconst_func, reconst_func_args, state
def __array_finalize__(self, obj):
if obj is None:
return
if isinstance(obj, FITS_rec):
self._character_as_bytes = obj._character_as_bytes
if isinstance(obj, FITS_rec) and obj.dtype == self.dtype:
self._converted = obj._converted
self._heapoffset = obj._heapoffset
self._heapsize = obj._heapsize
self._col_weakrefs = obj._col_weakrefs
self._coldefs = obj._coldefs
self._nfields = obj._nfields
self._gap = obj._gap
self._uint = obj._uint
elif self.dtype.fields is not None:
# This will allow regular ndarrays with fields, rather than
# just other FITS_rec objects
self._nfields = len(self.dtype.fields)
self._converted = {}
self._heapoffset = getattr(obj, "_heapoffset", 0)
self._heapsize = getattr(obj, "_heapsize", 0)
self._gap = getattr(obj, "_gap", 0)
self._uint = getattr(obj, "_uint", False)
self._col_weakrefs = weakref.WeakSet()
self._coldefs = ColDefs(self)
# Work around chicken-egg problem. Column.array relies on the
# _coldefs attribute to set up ref back to parent FITS_rec; however
# in the above line the self._coldefs has not been assigned yet so
# this fails. This patches that up...
for col in self._coldefs:
del col.array
col._parent_fits_rec = weakref.ref(self)
else:
self._init()
def _init(self):
"""Initializes internal attributes specific to FITS-isms."""
self._nfields = 0
self._converted = {}
self._heapoffset = 0
self._heapsize = 0
self._col_weakrefs = weakref.WeakSet()
self._coldefs = None
self._gap = 0
self._uint = False
@classmethod
def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False):
"""
Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec`
object.
.. note::
This was originally part of the ``new_table`` function in the table
module but was moved into a class method since most of its
functionality always had more to do with initializing a `FITS_rec`
object than anything else, and much of it also overlapped with
``FITS_rec._scale_back``.
Parameters
----------
columns : sequence of `Column` or a `ColDefs`
The columns from which to create the table data. If these
columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns
will be used as a template for a new table with the requested
number of rows.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If
`False`, copy the data from input, undefined cells will still
be filled with zeros/blanks.
"""
if not isinstance(columns, ColDefs):
columns = ColDefs(columns)
# read the delayed data
for column in columns:
arr = column.array
if isinstance(arr, Delayed):
if arr.hdu.data is None:
column.array = None
else:
column.array = _get_recarray_field(arr.hdu.data, arr.field)
# Reset columns._arrays (which we may want to just do away with
# altogether
del columns._arrays
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in columns._arrays:
if arr is not None:
dim = arr.shape[0]
else:
dim = 0
if dim > nrows:
nrows = dim
raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8)
raw_data.fill(ord(columns._padding_byte))
data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls)
data._character_as_bytes = character_as_bytes
# Previously this assignment was made from hdu.columns, but that's a
# bug since if a _TableBaseHDU has a FITS_rec in its .data attribute
# the _TableBaseHDU.columns property is actually returned from
# .data._coldefs, so this assignment was circular! Don't make that
# mistake again.
# All of this is an artifact of the fragility of the FITS_rec class,
# and that it can't just be initialized by columns...
data._coldefs = columns
# If fill is True we don't copy anything from the column arrays. We're
# just using them as a template, and returning a table filled with
# zeros/blanks
if fill:
return data
# Otherwise we have to fill the recarray with data from the input
# columns
for idx, column in enumerate(columns):
# For each column in the ColDef object, determine the number of
# rows in that column. This will be either the number of rows in
# the ndarray associated with the column, or the number of rows
# given in the call to this function, which ever is smaller. If
# the input FILL argument is true, the number of rows is set to
# zero so that no data is copied from the original input data.
arr = column.array
if arr is None:
array_size = 0
else:
array_size = len(arr)
n = min(array_size, nrows)
# TODO: At least *some* of this logic is mostly redundant with the
# _convert_foo methods in this class; see if we can eliminate some
# of that duplication.
if not n:
# The input column had an empty array, so just use the fill
# value
continue
field = _get_recarray_field(data, idx)
name = column.name
fitsformat = column.format
recformat = fitsformat.recformat
outarr = field[:n]
inarr = arr[:n]
if isinstance(recformat, _FormatX):
# Data is a bit array
if inarr.shape[-1] == recformat.repeat:
_wrapx(inarr, outarr, recformat.repeat)
continue
elif isinstance(recformat, _FormatP):
data._cache_field(name, _makep(inarr, field, recformat, nrows=nrows))
continue
# TODO: Find a better way of determining that the column is meant
# to be FITS L formatted
elif recformat[-2:] == FITS2NUMPY["L"] and inarr.dtype == bool:
# column is boolean
# The raw data field should be filled with either 'T' or 'F'
# (not 0). Use 'F' as a default
field[:] = ord("F")
# Also save the original boolean array in data._converted so
# that it doesn't have to be re-converted
converted = np.zeros(field.shape, dtype=bool)
converted[:n] = inarr
data._cache_field(name, converted)
# TODO: Maybe this step isn't necessary at all if _scale_back
# will handle it?
inarr = np.where(inarr == np.False_, ord("F"), ord("T"))
elif columns[idx]._physical_values and columns[idx]._pseudo_unsigned_ints:
# Temporary hack...
bzero = column.bzero
converted = np.zeros(field.shape, dtype=inarr.dtype)
converted[:n] = inarr
data._cache_field(name, converted)
if n < nrows:
# Pre-scale rows below the input data
field[n:] = -bzero
inarr = inarr - bzero
elif isinstance(columns, _AsciiColDefs):
# Regardless whether the format is character or numeric, if the
# input array contains characters then it's already in the raw
# format for ASCII tables
if fitsformat._pseudo_logical:
# Hack to support converting from 8-bit T/F characters
# Normally the column array is a chararray of 1 character
# strings, but we need to view it as a normal ndarray of
# 8-bit ints to fill it with ASCII codes for 'T' and 'F'
outarr = field.view(np.uint8, np.ndarray)[:n]
elif arr.dtype.kind not in ("S", "U"):
# Set up views of numeric columns with the appropriate
# numeric dtype
# Fill with the appropriate blanks for the column format
data._cache_field(name, np.zeros(nrows, dtype=arr.dtype))
outarr = data._converted[name][:n]
outarr[:] = inarr
continue
if inarr.shape != outarr.shape:
if (
inarr.dtype.kind == outarr.dtype.kind
and inarr.dtype.kind in ("U", "S")
and inarr.dtype != outarr.dtype
):
inarr_rowsize = inarr[0].size
inarr = inarr.flatten().view(outarr.dtype)
# This is a special case to handle input arrays with
# non-trivial TDIMn.
# By design each row of the outarray is 1-D, while each row of
# the input array may be n-D
if outarr.ndim > 1:
# The normal case where the first dimension is the rows
inarr_rowsize = inarr[0].size
inarr = inarr.reshape(n, inarr_rowsize)
outarr[:, :inarr_rowsize] = inarr
else:
# Special case for strings where the out array only has one
# dimension (the second dimension is rolled up into the
# strings
outarr[:n] = inarr.ravel()
else:
outarr[:] = inarr
# Now replace the original column array references with the new
# fields
# This is required to prevent the issue reported in
# https://github.com/spacetelescope/PyFITS/issues/99
for idx in range(len(columns)):
columns._arrays[idx] = data.field(idx)
return data
def __repr__(self):
# Force use of the normal ndarray repr (rather than the new
# one added for recarray in Numpy 1.10) for backwards compat
return np.ndarray.__repr__(self)
def __getattribute__(self, attr):
# First, see if ndarray has this attr, and return it if so. Note that
# this means a field with the same name as an ndarray attr cannot be
# accessed by attribute, this is Numpy's default behavior.
# We avoid using np.recarray.__getattribute__ here because after doing
# this check it would access the columns without doing the conversions
# that we need (with .field, see below).
try:
return object.__getattribute__(self, attr)
except AttributeError:
pass
# attr might still be a fieldname. If we have column definitions,
# we should access this via .field, as the data may have to be scaled.
if self._coldefs is not None and attr in self.columns.names:
return self.field(attr)
# If not, just let the usual np.recarray override deal with it.
return super().__getattribute__(attr)
def __getitem__(self, key):
if self._coldefs is None:
return super().__getitem__(key)
if isinstance(key, str):
return self.field(key)
# Have to view as a recarray then back as a FITS_rec, otherwise the
# circular reference fix/hack in FITS_rec.field() won't preserve
# the slice.
out = self.view(np.recarray)[key]
if type(out) is not np.recarray:
# Oops, we got a single element rather than a view. In that case,
# return a Record, which has no __getstate__ and is more efficient.
return self._record_type(self, key)
# We got a view; change it back to our class, and add stuff
out = out.view(type(self))
out._uint = self._uint
out._coldefs = ColDefs(self._coldefs)
arrays = []
out._converted = {}
for idx, name in enumerate(self._coldefs.names):
#
# Store the new arrays for the _coldefs object
#
arrays.append(self._coldefs._arrays[idx][key])
# Ensure that the sliced FITS_rec will view the same scaled
# columns as the original; this is one of the few cases where
# it is not necessary to use _cache_field()
if name in self._converted:
dummy = self._converted[name]
field = np.ndarray.__getitem__(dummy, key)
out._converted[name] = field
out._coldefs._arrays = arrays
return out
def __setitem__(self, key, value):
if self._coldefs is None:
return super().__setitem__(key, value)
if isinstance(key, str):
self[key][:] = value
return
if isinstance(key, slice):
end = min(len(self), key.stop or len(self))
end = max(0, end)
start = max(0, key.start or 0)
end = min(end, start + len(value))
for idx in range(start, end):
self.__setitem__(idx, value[idx - start])
return
if isinstance(value, FITS_record):
for idx in range(self._nfields):
self.field(self.names[idx])[key] = value.field(self.names[idx])
elif isinstance(value, (tuple, list, np.void)):
if self._nfields == len(value):
for idx in range(self._nfields):
self.field(idx)[key] = value[idx]
else:
raise ValueError(
f"Input tuple or list required to have {self._nfields} elements."
)
else:
raise TypeError(
"Assignment requires a FITS_record, tuple, or list as input."
)
def _ipython_key_completions_(self):
return self.names
def copy(self, order="C"):
"""
The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to
`numpy.copy`. Differences include that it re-views the copied array as
self's ndarray subclass, as though it were taking a slice; this means
``__array_finalize__`` is called and the copy shares all the array
attributes (including ``._converted``!). So we need to make a deep
copy of all those attributes so that the two arrays truly do not share
any data.
"""
new = super().copy(order=order)
new.__dict__ = copy.deepcopy(self.__dict__)
return new
@property
def columns(self):
"""A user-visible accessor for the coldefs."""
return self._coldefs
@property
def _coldefs(self):
# This used to be a normal internal attribute, but it was changed to a
# property as a quick and transparent way to work around the reference
# leak bug fixed in https://github.com/astropy/astropy/pull/4539
#
# See the long comment in the Column.array property for more details
# on this. But in short, FITS_rec now has a ._col_weakrefs attribute
# which is a WeakSet of weakrefs to each Column in _coldefs.
#
# So whenever ._coldefs is set we also add each Column in the ColDefs
# to the weakrefs set. This is an easy way to find out if a Column has
# any references to it external to the FITS_rec (i.e. a user assigned a
# column to a variable). If the column is still in _col_weakrefs then
# there are other references to it external to this FITS_rec. We use
# that information in __del__ to save off copies of the array data
# for those columns to their Column.array property before our memory
# is freed.
return self.__dict__.get("_coldefs")
@_coldefs.setter
def _coldefs(self, cols):
self.__dict__["_coldefs"] = cols
if isinstance(cols, ColDefs):
for col in cols.columns:
self._col_weakrefs.add(col)
@_coldefs.deleter
def _coldefs(self):
try:
del self.__dict__["_coldefs"]
except KeyError as exc:
raise AttributeError(exc.args[0])
def __del__(self):
try:
del self._coldefs
if self.dtype.fields is not None:
for col in self._col_weakrefs:
if col.array is not None:
col.array = col.array.copy()
# See issues #4690 and #4912
except (AttributeError, TypeError): # pragma: no cover
pass
@property
def names(self):
"""List of column names."""
if self.dtype.fields:
return list(self.dtype.names)
elif getattr(self, "_coldefs", None) is not None:
return self._coldefs.names
else:
return None
@property
def formats(self):
"""List of column FITS formats."""
if getattr(self, "_coldefs", None) is not None:
return self._coldefs.formats
return None
@property
def _raw_itemsize(self):
"""
Returns the size of row items that would be written to the raw FITS
file, taking into account the possibility of unicode columns being
compactified.
Currently for internal use only.
"""
if _has_unicode_fields(self):
total_itemsize = 0
for field in self.dtype.fields.values():
itemsize = field[0].itemsize
if field[0].kind == "U":
itemsize = itemsize // 4
total_itemsize += itemsize
return total_itemsize
else:
# Just return the normal itemsize
return self.itemsize
def field(self, key):
"""
A view of a `Column`'s data as an array.
"""
# NOTE: The *column* index may not be the same as the field index in
# the recarray, if the column is a phantom column
column = self.columns[key]
name = column.name
format = column.format
if format.dtype.itemsize == 0:
warnings.warn(
"Field {!r} has a repeat count of 0 in its format code, "
"indicating an empty field.".format(key)
)
return np.array([], dtype=format.dtype)
# If field's base is a FITS_rec, we can run into trouble because it
# contains a reference to the ._coldefs object of the original data;
# this can lead to a circular reference; see ticket #49
base = self
while isinstance(base, FITS_rec) and isinstance(base.base, np.recarray):
base = base.base
# base could still be a FITS_rec in some cases, so take care to
# use rec.recarray.field to avoid a potential infinite
# recursion
field = _get_recarray_field(base, name)
if name not in self._converted:
recformat = format.recformat
# TODO: If we're now passing the column to these subroutines, do we
# really need to pass them the recformat?
if isinstance(recformat, _FormatP) and self._load_variable_length_data:
# for P format
converted = self._convert_p(column, field, recformat)
else:
# Handle all other column data types which are fixed-width
# fields
converted = self._convert_other(column, field, recformat)
# Note: Never assign values directly into the self._converted dict;
# always go through self._cache_field; this way self._converted is
# only used to store arrays that are not already direct views of
# our own data.
self._cache_field(name, converted)
return converted
return self._converted[name]
def _cache_field(self, name, field):
"""
Do not store fields in _converted if one of its bases is self,
or if it has a common base with self.
This results in a reference cycle that cannot be broken since
ndarrays do not participate in cyclic garbage collection.
"""
base = field
while True:
self_base = self
while True:
if self_base is base:
return
if getattr(self_base, "base", None) is not None:
self_base = self_base.base
else:
break
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self._converted[name] = field
def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value):
"""
Update how the data is formatted depending on changes to column
attributes initiated by the user through the `Column` interface.
Dispatches column attribute change notifications to individual methods
for each attribute ``_update_column_<attr>``
"""
method_name = f"_update_column_{attr}"
if hasattr(self, method_name):
# Right now this is so we can be lazy and not implement updaters
# for every attribute yet--some we may not need at all, TBD
getattr(self, method_name)(column, idx, old_value, new_value)
def _update_column_name(self, column, idx, old_name, name):
"""Update the dtype field names when a column name is changed."""
dtype = self.dtype
# Updating the names on the dtype should suffice
dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]
def _convert_x(self, field, recformat):
"""Convert a raw table column to a bit array as specified by the
FITS X format.
"""
dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_)
_unwrapx(field, dummy, recformat.repeat)
return dummy
def _convert_p(self, column, field, recformat):
"""Convert a raw table column of FITS P or Q format descriptors
to a VLA column with the array data returned from the heap.
"""
if column.dim:
vla_shape = tuple(
reversed(tuple(map(int, column.dim.strip("()").split(","))))
)
dummy = _VLF([None] * len(self), dtype=recformat.dtype)
raw_data = self._get_raw_data()
if raw_data is None:
raise OSError(
"Could not find heap data for the {!r} variable-length "
"array column.".format(column.name)
)
for idx in range(len(self)):
offset = field[idx, 1] + self._heapoffset
count = field[idx, 0]
if recformat.dtype == "a":
dt = np.dtype(recformat.dtype + str(1))
arr_len = count * dt.itemsize
da = raw_data[offset : offset + arr_len].view(dt)
da = np.char.array(da.view(dtype=dt), itemsize=count)
dummy[idx] = decode_ascii(da)
else:
dt = np.dtype(recformat.dtype)
arr_len = count * dt.itemsize
dummy[idx] = raw_data[offset : offset + arr_len].view(dt)
if column.dim and len(vla_shape) > 1:
# The VLA is reshaped consistently with TDIM instructions
if vla_shape[0] == 1:
dummy[idx] = dummy[idx].reshape(1, len(dummy[idx]))
else:
vla_dim = vla_shape[1:]
vla_first = int(len(dummy[idx]) / np.prod(vla_dim))
dummy[idx] = dummy[idx].reshape((vla_first,) + vla_dim)
dummy[idx].dtype = dummy[idx].dtype.newbyteorder(">")
# Each array in the field may now require additional
# scaling depending on the other scaling parameters
# TODO: The same scaling parameters apply to every
# array in the column so this is currently very slow; we
# really only need to check once whether any scaling will
# be necessary and skip this step if not
# TODO: Test that this works for X format; I don't think
# that it does--the recformat variable only applies to the P
# format not the X format
dummy[idx] = self._convert_other(column, dummy[idx], recformat)
return dummy
def _convert_ascii(self, column, field):
"""
Special handling for ASCII table columns to convert columns containing
numeric types to actual numeric arrays from the string representation.
"""
format = column.format
recformat = getattr(format, "recformat", ASCII2NUMPY[format[0]])
# if the string = TNULL, return ASCIITNULL
nullval = str(column.null).strip().encode("ascii")
if len(nullval) > format.width:
nullval = nullval[: format.width]
# Before using .replace make sure that any trailing bytes in each
# column are filled with spaces, and *not*, say, nulls; this causes
# functions like replace to potentially leave gibberish bytes in the
# array buffer.
dummy = np.char.ljust(field, format.width)
dummy = np.char.replace(dummy, encode_ascii("D"), encode_ascii("E"))
null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width))
# Convert all fields equal to the TNULL value (nullval) to empty fields.
# TODO: These fields really should be converted to NaN or something else undefined.
# Currently they are converted to empty fields, which are then set to zero.
dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy)
# always replace empty fields, see https://github.com/astropy/astropy/pull/5394
if nullval != b"":
dummy = np.where(np.char.strip(dummy) == b"", null_fill, dummy)
try:
dummy = np.array(dummy, dtype=recformat)
except ValueError as exc:
indx = self.names.index(column.name)
raise ValueError(
"{}; the header may be missing the necessary TNULL{} "
"keyword or the table contains invalid data".format(exc, indx + 1)
)
return dummy
def _convert_other(self, column, field, recformat):
"""Perform conversions on any other fixed-width column data types.
This may not perform any conversion at all if it's not necessary, in
which case the original column array is returned.
"""
if isinstance(recformat, _FormatX):
# special handling for the X format
return self._convert_x(field, recformat)
scale_factors = self._get_scale_factors(column)
_str, _bool, _number, _scale, _zero, bscale, bzero, dim = scale_factors
indx = self.names.index(column.name)
# ASCII table, convert strings to numbers
# TODO:
# For now, check that these are ASCII columns by checking the coldefs
# type; in the future all columns (for binary tables, ASCII tables, or
# otherwise) should "know" what type they are already and how to handle
# converting their data from FITS format to native format and vice
# versa...
if not _str and isinstance(self._coldefs, _AsciiColDefs):
field = self._convert_ascii(column, field)
# Test that the dimensions given in dim are sensible; otherwise
# display a warning and ignore them
if dim:
# See if the dimensions already match, if not, make sure the
# number items will fit in the specified dimensions
if field.ndim > 1:
actual_shape = field.shape[1:]
if _str:
actual_shape = actual_shape + (field.itemsize,)
else:
actual_shape = field.shape[0]
if dim == actual_shape:
# The array already has the correct dimensions, so we
# ignore dim and don't convert
dim = None
else:
nitems = reduce(operator.mul, dim)
if _str:
actual_nitems = field.itemsize
elif len(field.shape) == 1:
# No repeat count in TFORMn, equivalent to 1
actual_nitems = 1
else:
actual_nitems = field.shape[1]
if nitems > actual_nitems and not isinstance(recformat, _FormatP):
warnings.warn(
"TDIM{} value {:d} does not fit with the size of "
"the array items ({:d}). TDIM{:d} will be ignored.".format(
indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1
)
)
dim = None
# further conversion for both ASCII and binary tables
# For now we've made columns responsible for *knowing* whether their
# data has been scaled, but we make the FITS_rec class responsible for
# actually doing the scaling
# TODO: This also needs to be fixed in the effort to make Columns
# responsible for scaling their arrays to/from FITS native values
if not column.ascii and column.format.p_format:
format_code = column.format.p_format
else:
# TODO: Rather than having this if/else it might be nice if the
# ColumnFormat class had an attribute guaranteed to give the format
# of actual values in a column regardless of whether the true
# format is something like P or Q
format_code = column.format.format
if _number and (_scale or _zero) and not column._physical_values:
# This is to handle pseudo unsigned ints in table columns
# TODO: For now this only really works correctly for binary tables
# Should it work for ASCII tables as well?
if self._uint:
if bzero == 2**15 and format_code == "I":
field = np.array(field, dtype=np.uint16)
elif bzero == 2**31 and format_code == "J":
field = np.array(field, dtype=np.uint32)
elif bzero == 2**63 and format_code == "K":
field = np.array(field, dtype=np.uint64)
bzero64 = np.uint64(2**63)
else:
field = np.array(field, dtype=np.float64)
else:
field = np.array(field, dtype=np.float64)
if _scale:
np.multiply(field, bscale, field)
if _zero:
if self._uint and format_code == "K":
# There is a chance of overflow, so be careful
test_overflow = field.copy()
try:
test_overflow += bzero64
except OverflowError:
warnings.warn(
"Overflow detected while applying TZERO{:d}. "
"Returning unscaled data.".format(indx + 1)
)
else:
field = test_overflow
else:
field += bzero
# mark the column as scaled
column._physical_values = True
elif _bool and field.dtype != bool:
field = np.equal(field, ord("T"))
elif _str:
if not self._character_as_bytes:
with suppress(UnicodeDecodeError):
field = decode_ascii(field)
if dim and not isinstance(recformat, _FormatP):
# Apply the new field item dimensions
nitems = reduce(operator.mul, dim)
if field.ndim > 1:
field = field[:, :nitems]
if _str:
fmt = field.dtype.char
dtype = (f"|{fmt}{dim[-1]}", dim[:-1])
field.dtype = dtype
else:
field.shape = (field.shape[0],) + dim
return field
def _get_heap_data(self):
"""
Returns a pointer into the table's raw data to its heap (if present).
This is returned as a numpy byte array.
"""
if self._heapsize:
raw_data = self._get_raw_data().view(np.ubyte)
heap_end = self._heapoffset + self._heapsize
return raw_data[self._heapoffset : heap_end]
else:
return np.array([], dtype=np.ubyte)
def _get_raw_data(self):
"""
Returns the base array of self that "raw data array" that is the
array in the format that it was first read from a file before it was
sliced or viewed as a different type in any way.
This is determined by walking through the bases until finding one that
has at least the same number of bytes as self, plus the heapsize. This
may be the immediate .base but is not always. This is used primarily
for variable-length array support which needs to be able to find the
heap (the raw data *may* be larger than nbytes + heapsize if it
contains a gap or padding).
May return ``None`` if no array resembling the "raw data" according to
the stated criteria can be found.
"""
raw_data_bytes = self.nbytes + self._heapsize
base = self
while hasattr(base, "base") and base.base is not None:
base = base.base
# Variable-length-arrays: should take into account the case of
# empty arrays
if hasattr(base, "_heapoffset"):
if hasattr(base, "nbytes") and base.nbytes > raw_data_bytes:
return base
# non variable-length-arrays
else:
if hasattr(base, "nbytes") and base.nbytes >= raw_data_bytes:
return base
def _get_scale_factors(self, column):
"""Get all the scaling flags and factors for one column."""
# TODO: Maybe this should be a method/property on Column? Or maybe
# it's not really needed at all...
_str = column.format.format == "A"
_bool = column.format.format == "L"
_number = not (_bool or _str)
bscale = column.bscale
bzero = column.bzero
_scale = bscale not in ("", None, 1)
_zero = bzero not in ("", None, 0)
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
# column._dims gives a tuple, rather than column.dim which returns the
# original string format code from the FITS header...
dim = column._dims
return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim)
def _scale_back(self, update_heap_pointers=True):
"""
Update the parent array, using the (latest) scaled array.
If ``update_heap_pointers`` is `False`, this will leave all the heap
pointers in P/Q columns as they are verbatim--it only makes sense to do
this if there is already data on the heap and it can be guaranteed that
that data has not been modified, and there is not new data to add to
the heap. Currently this is only used as an optimization for
CompImageHDU that does its own handling of the heap.
"""
# Running total for the new heap size
heapsize = 0
for indx, name in enumerate(self.dtype.names):
column = self._coldefs[indx]
recformat = column.format.recformat
raw_field = _get_recarray_field(self, indx)
# add the location offset of the heap area for each
# variable length column
if isinstance(recformat, _FormatP):
# Irritatingly, this can return a different dtype than just
# doing np.dtype(recformat.dtype); but this returns the results
# that we want. For example if recformat.dtype is 'a' we want
# an array of characters.
dtype = np.array([], dtype=recformat.dtype).dtype
if update_heap_pointers and name in self._converted:
# The VLA has potentially been updated, so we need to
# update the array descriptors
raw_field[:] = 0 # reset
npts = [np.prod(arr.shape) for arr in self._converted[name]]
raw_field[: len(npts), 0] = npts
raw_field[1:, 1] = (
np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize
)
raw_field[:, 1][:] += heapsize
heapsize += raw_field[:, 0].sum() * dtype.itemsize
# Even if this VLA has not been read or updated, we need to
# include the size of its constituent arrays in the heap size
# total
if type(recformat) == _FormatP and heapsize >= 2**31:
raise ValueError(
"The heapsize limit for 'P' format has been reached. "
"Please consider using the 'Q' format for your file."
)
if isinstance(recformat, _FormatX) and name in self._converted:
_wrapx(self._converted[name], raw_field, recformat.repeat)
continue
scale_factors = self._get_scale_factors(column)
_str, _bool, _number, _scale, _zero, bscale, bzero, _ = scale_factors
field = self._converted.get(name, raw_field)
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero) and column._physical_values:
dummy = field.copy()
if _zero:
dummy -= bzero
if _scale:
dummy /= bscale
# This will set the raw values in the recarray back to
# their non-physical storage values, so the column should
# be mark is not scaled
column._physical_values = False
elif _str or isinstance(self._coldefs, _AsciiColDefs):
dummy = field
else:
continue
# ASCII table, convert numbers to strings
if isinstance(self._coldefs, _AsciiColDefs):
self._scale_back_ascii(indx, dummy, raw_field)
# binary table string column
elif isinstance(raw_field, chararray.chararray):
self._scale_back_strings(indx, dummy, raw_field)
# all other binary table columns
else:
if len(raw_field) and isinstance(raw_field[0], np.integer):
dummy = np.around(dummy)
if raw_field.shape == dummy.shape:
raw_field[:] = dummy
else:
# Reshaping the data is necessary in cases where the
# TDIMn keyword was used to shape a column's entries
# into arrays
raw_field[:] = dummy.ravel().view(raw_field.dtype)
del dummy
# ASCII table does not have Boolean type
elif _bool and name in self._converted:
choices = (
np.array([ord("F")], dtype=np.int8)[0],
np.array([ord("T")], dtype=np.int8)[0],
)
raw_field[:] = np.choose(field, choices)
# Store the updated heapsize
self._heapsize = heapsize
def _scale_back_strings(self, col_idx, input_field, output_field):
# There are a few possibilities this has to be able to handle properly
# The input_field, which comes from the _converted column is of dtype
# 'Un' so that elements read out of the array are normal str
# objects (i.e. unicode strings)
#
# At the other end the *output_field* may also be of type 'S' or of
# type 'U'. It will *usually* be of type 'S' because when reading
# an existing FITS table the raw data is just ASCII strings, and
# represented in Numpy as an S array. However, when a user creates
# a new table from scratch, they *might* pass in a column containing
# unicode strings (dtype 'U'). Therefore the output_field of the
# raw array is actually a unicode array. But we still want to make
# sure the data is encodable as ASCII. Later when we write out the
# array we use, in the dtype 'U' case, a different write routine
# that writes row by row and encodes any 'U' columns to ASCII.
# If the output_field is non-ASCII we will worry about ASCII encoding
# later when writing; otherwise we can do it right here
if input_field.dtype.kind == "U" and output_field.dtype.kind == "S":
try:
_ascii_encode(input_field, out=output_field)
except _UnicodeArrayEncodeError as exc:
raise ValueError(
"Could not save column '{}': Contains characters that "
"cannot be encoded as ASCII as required by FITS, starting "
"at the index {!r} of the column, and the index {} of "
"the string at that location.".format(
self._coldefs[col_idx].name,
exc.index[0] if len(exc.index) == 1 else exc.index,
exc.start,
)
)
else:
# Otherwise go ahead and do a direct copy into--if both are type
# 'U' we'll handle encoding later
input_field = input_field.flatten().view(output_field.dtype)
output_field.flat[:] = input_field
# Ensure that blanks at the end of each string are
# converted to nulls instead of spaces, see Trac #15
# and #111
_rstrip_inplace(output_field)
def _scale_back_ascii(self, col_idx, input_field, output_field):
"""
Convert internal array values back to ASCII table representation.
The ``input_field`` is the internal representation of the values, and
the ``output_field`` is the character array representing the ASCII
output that will be written.
"""
starts = self._coldefs.starts[:]
spans = self._coldefs.spans
format = self._coldefs[col_idx].format
# The the index of the "end" column of the record, beyond
# which we can't write
end = super().field(-1).itemsize
starts.append(end + starts[-1])
if col_idx > 0:
lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1]
else:
lead = 0
if lead < 0:
warnings.warn(
f"Column {col_idx + 1} starting point overlaps the previous column."
)
trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx]
if trail < 0:
warnings.warn(
f"Column {col_idx + 1} ending point overlaps the next column."
)
# TODO: It would be nice if these string column formatting
# details were left to a specialized class, as is the case
# with FormatX and FormatP
if "A" in format:
_pc = "{:"
else:
_pc = "{:>"
fmt = "".join([_pc, format[1:], ASCII2STR[format[0]], "}", (" " * trail)])
# Even if the format precision is 0, we should output a decimal point
# as long as there is space to do so--not including a decimal point in
# a float value is discouraged by the FITS Standard
trailing_decimal = format.precision == 0 and format.format in ("F", "E", "D")
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for jdx, value in enumerate(input_field):
value = fmt.format(value)
if len(value) > starts[col_idx + 1] - starts[col_idx]:
raise ValueError(
"Value {!r} does not fit into the output's itemsize of {}.".format(
value, spans[col_idx]
)
)
if trailing_decimal and value[0] == " ":
# We have some extra space in the field for the trailing
# decimal point
value = value[1:] + "."
output_field[jdx] = value
# Replace exponent separator in floating point numbers
if "D" in format:
output_field[:] = output_field.replace(b"E", b"D")
def tolist(self):
# Override .tolist to take care of special case of VLF
column_lists = [self[name].tolist() for name in self.columns.names]
return [list(row) for row in zip(*column_lists)]
def _get_recarray_field(array, key):
"""
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects.
"""
# Numpy >= 1.10.dev recarray no longer returns chararrays for strings
# This is currently needed for backwards-compatibility and for
# automatic truncation of trailing whitespace
field = np.recarray.field(array, key)
if field.dtype.char in ("S", "U") and not isinstance(field, chararray.chararray):
field = field.view(chararray.chararray)
return field
class _UnicodeArrayEncodeError(UnicodeEncodeError):
def __init__(self, encoding, object_, start, end, reason, index):
super().__init__(encoding, object_, start, end, reason)
self.index = index
def _ascii_encode(inarray, out=None):
"""
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded.
"""
out_dtype = np.dtype((f"S{inarray.dtype.itemsize // 4}", inarray.dtype.shape))
if out is not None:
out = out.view(out_dtype)
op_dtypes = [inarray.dtype, out_dtype]
op_flags = [["readonly"], ["writeonly", "allocate"]]
it = np.nditer(
[inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=["zerosize_ok"]
)
try:
for initem, outitem in it:
outitem[...] = initem.item().encode("ascii")
except UnicodeEncodeError as exc:
index = np.unravel_index(it.iterindex, inarray.shape)
raise _UnicodeArrayEncodeError(*(exc.args + (index,)))
return it.operands[1]
def _has_unicode_fields(array):
"""
Returns True if any fields in a structured array have Unicode dtype.
"""
dtypes = (d[0] for d in array.dtype.fields.values())
return any(d.kind == "U" for d in dtypes)
|
7cc2c60800d837045fd0f950282b6bb419166e83ac67b524f47cfd5d84177a8b | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
import itertools
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
import numpy as np
from packaging.version import Version
from astropy.utils import data
from astropy.utils.exceptions import AstropyUserWarning
path_like = (str, bytes, os.PathLike)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = f"_update_{notification}"
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state["_listeners"] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Examples
--------
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter("__name__")):
if sub not in _seen:
_seen.add(sub)
yield sub
yield from itersubclasses(sub, _seen)
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.current_thread()
single_thread = (
threading.active_count() == 1 and curr_thread.name == "MainThread"
)
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn(
f"KeyboardInterrupt ignored until {func.__name__} is complete!",
AstropyUserWarning,
)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
if sys.version_info[:2] >= (3, 10):
from itertools import pairwise
else:
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode("ascii")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.str_):
ns = np.char.encode(s, "ascii").view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.bytes_):
raise TypeError("string operation on non-string array")
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode("ascii")
except UnicodeDecodeError:
warnings.warn(
"non-ASCII characters are present in the FITS "
'file header and have been replaced by "?" characters',
AstropyUserWarning,
)
s = s.decode("ascii", errors="replace")
return s.replace("\ufffd", "?")
elif isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.bytes_):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array : https://github.com/numpy/numpy/issues/13156
dt = s.dtype.str.replace("S", "U")
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, "ascii").view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.str_):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError("string operation on non-string array")
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, "readable"):
return f.readable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "read"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "r+"):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, "writable"):
return f.writable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "write"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "wa+"):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, "buffer"):
return isfile(f.buffer)
elif hasattr(f, "raw"):
return isfile(f.raw)
return False
def fileobj_name(f):
"""
Returns the 'name' of file-like object *f*, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, (str, bytes)):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, "name"):
return f.name
elif hasattr(f, "filename"):
return f.filename
elif hasattr(f, "__class__"):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if *f* is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, path_like):
return True
if hasattr(f, "closed"):
return f.closed
elif hasattr(f, "fileobj") and hasattr(f.fileobj, "closed"):
return f.fileobj.closed
elif hasattr(f, "fp") and hasattr(f.fp, "closed"):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, "fileobj") and hasattr(f.fileobj, "mode"):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, "fileobj_mode"):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, "fp") and hasattr(f.fp, "mode"):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, "mode"):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return "rb"
elif mode == gzip.WRITE:
return "wb"
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if "+" in mode:
mode = mode.replace("+", "")
mode += "+"
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, "binary"):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return "b" in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split("\n\n")
def maybe_fill(t):
if all(len(line) < width for line in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return "\n\n".join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if sys.platform == "darwin" and Version(platform.mac_ver()[0]) < Version(
"10.9"
):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024**3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2**32) - 1
_WIN_WRITE_LIMIT = (2**31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : ndarray
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
try:
seekable = outfile.seekable()
except AttributeError:
seekable = False
if isfile(outfile) and seekable:
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (
sys.platform == "darwin"
and arr.nbytes >= _OSX_WRITE_LIMIT + 1
and arr.nbytes % 4096 == 0
):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith("win"):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx : idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, "nditer"):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order="C"):
fileobj.write(item.tobytes())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if (sys.byteorder == "little" and byteorder == ">") or (
sys.byteorder == "big" and byteorder == "<"
):
for item in arr.flat:
fileobj.write(item.byteswap().tobytes())
else:
for item in arr.flat:
fileobj.write(item.tobytes())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif array.dtype.itemsize == dtype.itemsize and not (
np.issubdtype(array.dtype, np.number) and np.issubdtype(dtype, np.number)
):
# Includes a special case when both dtypes are at least numeric to
# account for old Trac ticket 218 (now inaccessible).
return array.view(dtype)
else:
return array.astype(dtype)
def _pseudo_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
# special case for int8
if dtype.kind == "i" and dtype.itemsize == 1:
return -128
assert dtype.kind == "u"
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_integer(dtype):
return (dtype.kind == "u" and dtype.itemsize >= 2) or (
dtype.kind == "i" and dtype.itemsize == 1
)
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode("utf8") + b" ", dtype="S1")
# locations of the blanks
blank_loc = np.nonzero(arr == b" ")[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, "base") and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ""
if not isinstance(hdulist, list):
hdulist = [hdulist]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = (
"Not enough space on disk: requested {}, available {}. ".format(
hdulist_size, free_space
)
)
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(f"io/fits/tests/data/{filename}", "astropy")
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in "SU":
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == "S" else 4
dt_int = f"({dt.itemsize // bpc},){dt.byteorder}u{bpc}"
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j : j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
def _is_dask_array(data):
"""Check whether data is a dask array.
We avoid importing dask unless it is likely it is a dask array,
so that non-dask code is not slowed down.
"""
if not hasattr(data, "compute"):
return False
try:
from dask.array import Array
except ImportError:
# If we cannot import dask, surely this cannot be a
# dask array!
return False
else:
return isinstance(data, Array)
|
4da91246409817b5a50490feff48d4dc03196d01aa6d95963058ca6ba93ba17e | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import re
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from . import conf
from .util import _is_int, _str_to_num, _words_group, translate
from .verify import VerifyError, VerifyWarning, _ErrList, _Verify
__all__ = ["Card", "Undefined"]
FIX_FP_TABLE = str.maketrans("de", "DE")
FIX_FP_TABLE2 = str.maketrans("dD", "eE")
CARD_LENGTH = 80
BLANK_CARD = " " * CARD_LENGTH
KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords
VALUE_INDICATOR = "= " # The standard FITS value indicator
VALUE_INDICATOR_LEN = len(VALUE_INDICATOR)
HIERARCH_VALUE_INDICATOR = "=" # HIERARCH cards may use a shortened indicator
class Undefined:
"""Undefined value."""
def __init__(self):
# This __init__ is required to be here for Sphinx documentation
pass
UNDEFINED = Undefined()
class Card(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r"^[A-Z0-9_-]{0,%d}$" % KEYWORD_LENGTH)
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(r"^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$", re.I)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r"(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?"
_digits_NFSC = r"(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?"
_numr_FSC = r"[+-]?" + _digits_FSC
_numr_NFSC = r"[+-]? *" + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(rf"(?P<sign>[+-])?0*?(?P<digt>{_digits_FSC})")
_number_NFSC_RE = re.compile(rf"(?P<sign>[+-])? *0*?(?P<digt>{_digits_NFSC})")
# Used in cards using the CONTINUE convention which expect a string
# followed by an optional comment
_strg = r"\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )"
_comm_field = r"(?P<comm_field>(?P<sepr>/ *)(?P<comm>(.|\n)*))"
_strg_comment_RE = re.compile(f"({_strg})? *{_comm_field}?$")
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
_ascii_text_re = re.compile(r"[ -~]*\Z")
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
# fmt: off
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + r') *, *'
r'(?P<imag>' + _numr_FSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$'
)
# fmt: on
# fmt: off
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + r') *, *'
r'(?P<imag>' + _numr_NFSC + r') *\))'
fr')? *){_comm_field}?$'
)
# fmt: on
_rvkc_identifier = r"[a-zA-Z_]\w*"
_rvkc_field = _rvkc_identifier + r"(\.\d+)?"
_rvkc_field_specifier_s = rf"{_rvkc_field}(\.{_rvkc_field})*"
_rvkc_field_specifier_val = r"(?P<keyword>{}): +(?P<val>{})".format(
_rvkc_field_specifier_s, _numr_FSC
)
_rvkc_keyword_val = rf"\'(?P<rawval>{_rvkc_field_specifier_val})\'"
_rvkc_keyword_val_comm = rf" *{_rvkc_keyword_val} *(/ *(?P<comm>[ -~]*))?$"
_rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + "$")
# regular expression to extract the key and the field specifier from a
# string that is being used to index into a card list that contains
# record value keyword cards (ex. 'DP1.AXIS.1')
_rvkc_keyword_name_RE = re.compile(
r"(?P<keyword>{})\.(?P<field_specifier>{})$".format(
_rvkc_identifier, _rvkc_field_specifier_s
)
)
# regular expression to extract the field specifier and value and comment
# from the string value of a record value keyword card
# (ex "'AXIS.1: 1' / a comment")
_rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm)
_commentary_keywords = {"", "COMMENT", "HISTORY", "END"}
_special_keywords = _commentary_keywords.union(["CONTINUE"])
# The default value indicator; may be changed if required by a convention
# (namely HIERARCH cards)
_value_indicator = VALUE_INDICATOR
def __init__(self, keyword=None, value=None, comment=None, **kwargs):
# For backwards compatibility, support the 'key' keyword argument:
if keyword is None and "key" in kwargs:
keyword = kwargs["key"]
self._keyword = None
self._value = None
self._comment = None
self._valuestring = None
self._image = None
# This attribute is set to False when creating the card from a card
# image to ensure that the contents of the image get verified at some
# point
self._verified = True
# A flag to conveniently mark whether or not this was a valid HIERARCH
# card
self._hierarch = False
# If the card could not be parsed according the the FITS standard or
# any recognized non-standard conventions, this will be True
self._invalid = False
self._field_specifier = None
# These are used primarily only by RVKCs
self._rawkeyword = None
self._rawvalue = None
if not (
keyword is not None
and value is not None
and self._check_if_rvkc(keyword, value)
):
# If _check_if_rvkc passes, it will handle setting the keyword and
# value
if keyword is not None:
self.keyword = keyword
if value is not None:
self.value = value
if comment is not None:
self.comment = comment
self._modified = False
self._valuemodified = False
def __repr__(self):
return repr((self.keyword, self.value, self.comment))
def __str__(self):
return self.image
def __len__(self):
return 3
def __getitem__(self, index):
return (self.keyword, self.value, self.comment)[index]
@property
def keyword(self):
"""Returns the keyword name parsed from the card image."""
if self._keyword is not None:
return self._keyword
elif self._image:
self._keyword = self._parse_keyword()
return self._keyword
else:
self.keyword = ""
return ""
@keyword.setter
def keyword(self, keyword):
"""Set the key attribute; once set it cannot be modified."""
if self._keyword is not None:
raise AttributeError("Once set, the Card keyword may not be modified")
elif isinstance(keyword, str):
# Be nice and remove trailing whitespace--some FITS code always
# pads keywords out with spaces; leading whitespace, however,
# should be strictly disallowed.
keyword = keyword.rstrip()
keyword_upper = keyword.upper()
if len(keyword) <= KEYWORD_LENGTH and self._keywd_FSC_RE.match(
keyword_upper
):
# For keywords with length > 8 they will be HIERARCH cards,
# and can have arbitrary case keywords
if keyword_upper == "END":
raise ValueError("Keyword 'END' not allowed.")
keyword = keyword_upper
elif self._keywd_hierarch_RE.match(keyword):
# In prior versions of PyFITS (*) HIERARCH cards would only be
# created if the user-supplied keyword explicitly started with
# 'HIERARCH '. Now we will create them automatically for long
# keywords, but we still want to support the old behavior too;
# the old behavior makes it possible to create HIERARCH cards
# that would otherwise be recognized as RVKCs
# (*) This has never affected Astropy, because it was changed
# before PyFITS was merged into Astropy!
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
if keyword_upper[:9] == "HIERARCH ":
# The user explicitly asked for a HIERARCH card, so don't
# bug them about it...
keyword = keyword[9:].strip()
else:
# We'll gladly create a HIERARCH card, but a warning is
# also displayed
warnings.warn(
"Keyword name {!r} is greater than 8 characters or "
"contains characters not allowed by the FITS "
"standard; a HIERARCH card will be created.".format(keyword),
VerifyWarning,
)
else:
raise ValueError(f"Illegal keyword name: {keyword!r}.")
self._keyword = keyword
self._modified = True
else:
raise ValueError(f"Keyword name {keyword!r} is not a string.")
@property
def value(self):
"""The value associated with the keyword stored in this card."""
if self.field_specifier:
return float(self._value)
if self._value is not None:
value = self._value
elif self._valuestring is not None or self._image:
value = self._value = self._parse_value()
else:
if self._keyword == "":
self._value = value = ""
else:
self._value = value = UNDEFINED
if conf.strip_header_whitespace and isinstance(value, str):
value = value.rstrip()
return value
@value.setter
def value(self, value):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if value is None:
value = UNDEFINED
try:
oldvalue = self.value
except VerifyError:
# probably a parsing error, falling back to the internal _value
# which should be None. This may happen while calling _fix_value.
oldvalue = self._value
if oldvalue is None:
oldvalue = UNDEFINED
if not isinstance(
value,
(
str,
int,
float,
complex,
bool,
Undefined,
np.floating,
np.integer,
np.complexfloating,
np.bool_,
),
):
raise ValueError(f"Illegal value: {value!r}.")
if isinstance(value, (float, np.float32)) and (
np.isnan(value) or np.isinf(value)
):
# value is checked for both float and np.float32 instances
# since np.float32 is not considered a Python float.
raise ValueError(
f"Floating point {value!r} values are not allowed in FITS headers."
)
elif isinstance(value, str):
m = self._ascii_text_re.match(value)
if not m:
raise ValueError(
"FITS header values must contain standard printable ASCII "
"characters; {!r} contains characters not representable in "
"ASCII or non-printable characters.".format(value)
)
elif isinstance(value, np.bool_):
value = bool(value)
if conf.strip_header_whitespace and (
isinstance(oldvalue, str) and isinstance(value, str)
):
# Ignore extra whitespace when comparing the new value to the old
different = oldvalue.rstrip() != value.rstrip()
elif isinstance(oldvalue, bool) or isinstance(value, bool):
different = oldvalue is not value
else:
different = oldvalue != value or not isinstance(value, type(oldvalue))
if different:
self._value = value
self._rawvalue = None
self._modified = True
self._valuestring = None
self._valuemodified = True
if self.field_specifier:
try:
self._value = _int_or_float(self._value)
except ValueError:
raise ValueError(f"value {self._value} is not a float")
@value.deleter
def value(self):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
if not self.field_specifier:
self.value = ""
else:
raise AttributeError(
"Values cannot be deleted from record-valued keyword cards"
)
@property
def rawkeyword(self):
"""On record-valued keyword cards this is the name of the standard <= 8
character FITS keyword that this RVKC is stored in. Otherwise it is
the card's normal keyword.
"""
if self._rawkeyword is not None:
return self._rawkeyword
elif self.field_specifier is not None:
self._rawkeyword = self.keyword.split(".", 1)[0]
return self._rawkeyword
else:
return self.keyword
@property
def rawvalue(self):
"""On record-valued keyword cards this is the raw string value in
the ``<field-specifier>: <value>`` format stored in the card in order
to represent a RVKC. Otherwise it is the card's normal value.
"""
if self._rawvalue is not None:
return self._rawvalue
elif self.field_specifier is not None:
self._rawvalue = f"{self.field_specifier}: {self.value}"
return self._rawvalue
else:
return self.value
@property
def comment(self):
"""Get the comment attribute from the card image if not already set."""
if self._comment is not None:
return self._comment
elif self._image:
self._comment = self._parse_comment()
return self._comment
else:
self._comment = ""
return ""
@comment.setter
def comment(self, comment):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if comment is None:
comment = ""
if isinstance(comment, str):
m = self._ascii_text_re.match(comment)
if not m:
raise ValueError(
"FITS header comments must contain standard printable "
f"ASCII characters; {comment!r} contains characters not "
"representable in ASCII or non-printable characters."
)
try:
oldcomment = self.comment
except VerifyError:
# probably a parsing error, falling back to the internal _comment
# which should be None.
oldcomment = self._comment
if oldcomment is None:
oldcomment = ""
if comment != oldcomment:
self._comment = comment
self._modified = True
@comment.deleter
def comment(self):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
self.comment = ""
@property
def field_specifier(self):
"""
The field-specifier of record-valued keyword cards; always `None` on
normal cards.
"""
# Ensure that the keyword exists and has been parsed--the will set the
# internal _field_specifier attribute if this is a RVKC.
if self.keyword:
return self._field_specifier
else:
return None
@field_specifier.setter
def field_specifier(self, field_specifier):
if not field_specifier:
raise ValueError(
"The field-specifier may not be blank in record-valued keyword cards."
)
elif not self.field_specifier:
raise AttributeError(
"Cannot coerce cards to be record-valued keyword cards by "
"setting the field_specifier attribute"
)
elif field_specifier != self.field_specifier:
self._field_specifier = field_specifier
# The keyword need also be updated
keyword = self._keyword.split(".", 1)[0]
self._keyword = f"{keyword}.{field_specifier}"
self._modified = True
@field_specifier.deleter
def field_specifier(self):
raise AttributeError(
"The field_specifier attribute may not be "
"deleted from record-valued keyword cards."
)
@property
def image(self):
"""
The card "image", that is, the 80 byte character string that represents
this card in an actual FITS header.
"""
if self._image and not self._verified:
self.verify("fix+warn")
if self._image is None or self._modified:
self._image = self._format_image()
return self._image
@property
def is_blank(self):
"""
`True` if the card is completely blank--that is, it has no keyword,
value, or comment. It appears in the header as 80 spaces.
Returns `False` otherwise.
"""
if not self._verified:
# The card image has not been parsed yet; compare directly with the
# string representation of a blank card
return self._image == BLANK_CARD
# If the keyword, value, and comment are all empty (for self.value
# explicitly check that it is a string value, since a blank value is
# returned as '')
return (
not self.keyword
and (isinstance(self.value, str) and not self.value)
and not self.comment
)
@classmethod
def fromstring(cls, image):
"""
Construct a `Card` object from a (raw) string. It will pad the string
if it is not the length of a card image (80 columns). If the card
image is longer than 80 columns, assume it contains ``CONTINUE``
card(s).
"""
card = cls()
if isinstance(image, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place
image = image.decode("latin1")
card._image = _pad(image)
card._verified = False
return card
@classmethod
def normalize_keyword(cls, keyword):
"""
`classmethod` to convert a keyword value that may contain a
field-specifier to uppercase. The effect is to raise the key to
uppercase and leave the field specifier in its original case.
Parameters
----------
keyword : or str
A keyword value or a ``keyword.field-specifier`` value
"""
# Test first for the most common case: a standard FITS keyword provided
# in standard all-caps
if len(keyword) <= KEYWORD_LENGTH and cls._keywd_FSC_RE.match(keyword):
return keyword
# Test if this is a record-valued keyword
match = cls._rvkc_keyword_name_RE.match(keyword)
if match:
return ".".join(
(match.group("keyword").strip().upper(), match.group("field_specifier"))
)
elif len(keyword) > 9 and keyword[:9].upper() == "HIERARCH ":
# Remove 'HIERARCH' from HIERARCH keywords; this could lead to
# ambiguity if there is actually a keyword card containing
# "HIERARCH HIERARCH", but shame on you if you do that.
return keyword[9:].strip().upper()
else:
# A normal FITS keyword, but provided in non-standard case
return keyword.strip().upper()
def _check_if_rvkc(self, *args):
"""
Determine whether or not the card is a record-valued keyword card.
If one argument is given, that argument is treated as a full card image
and parsed as such. If two arguments are given, the first is treated
as the card keyword (including the field-specifier if the card is
intended as a RVKC), and the second as the card value OR the first value
can be the base keyword, and the second value the 'field-specifier:
value' string.
If the check passes the ._keyword, ._value, and .field_specifier
keywords are set.
Examples
--------
::
self._check_if_rvkc('DP1', 'AXIS.1: 2')
self._check_if_rvkc('DP1.AXIS.1', 2)
self._check_if_rvkc('DP1 = AXIS.1: 2')
"""
if not conf.enable_record_valued_keyword_cards:
return False
if len(args) == 1:
return self._check_if_rvkc_image(*args)
elif len(args) == 2:
keyword, value = args
if not isinstance(keyword, str):
return False
if keyword in self._commentary_keywords:
return False
match = self._rvkc_keyword_name_RE.match(keyword)
if match and isinstance(value, (int, float)):
self._init_rvkc(
match.group("keyword"), match.group("field_specifier"), None, value
)
return True
# Testing for ': ' is a quick way to avoid running the full regular
# expression, speeding this up for the majority of cases
if isinstance(value, str) and value.find(": ") > 0:
match = self._rvkc_field_specifier_val_RE.match(value)
if match and self._keywd_FSC_RE.match(keyword):
self._init_rvkc(
keyword, match.group("keyword"), value, match.group("val")
)
return True
def _check_if_rvkc_image(self, *args):
"""
Implements `Card._check_if_rvkc` for the case of an unparsed card
image. If given one argument this is the full intact image. If given
two arguments the card has already been split between keyword and
value+comment at the standard value indicator '= '.
"""
if len(args) == 1:
image = args[0]
eq_idx = image.find(VALUE_INDICATOR)
if eq_idx < 0 or eq_idx > 9:
return False
keyword = image[:eq_idx]
rest = image[eq_idx + VALUE_INDICATOR_LEN :]
else:
keyword, rest = args
rest = rest.lstrip()
# This test allows us to skip running the full regular expression for
# the majority of cards that do not contain strings or that definitely
# do not contain RVKC field-specifiers; it's very much a
# micro-optimization but it does make a measurable difference
if not rest or rest[0] != "'" or rest.find(": ") < 2:
return False
match = self._rvkc_keyword_val_comm_RE.match(rest)
if match:
self._init_rvkc(
keyword,
match.group("keyword"),
match.group("rawval"),
match.group("val"),
)
return True
def _init_rvkc(self, keyword, field_specifier, field, value):
"""
Sort of addendum to Card.__init__ to set the appropriate internal
attributes if the card was determined to be a RVKC.
"""
keyword_upper = keyword.upper()
self._keyword = f"{keyword_upper}.{field_specifier}"
self._rawkeyword = keyword_upper
self._field_specifier = field_specifier
self._value = _int_or_float(value)
self._rawvalue = field
def _parse_keyword(self):
keyword = self._image[:KEYWORD_LENGTH].strip()
keyword_upper = keyword.upper()
if keyword_upper in self._special_keywords:
return keyword_upper
elif (
keyword_upper == "HIERARCH"
and self._image[8] == " "
and HIERARCH_VALUE_INDICATOR in self._image
):
# This is valid HIERARCH card as described by the HIERARCH keyword
# convention:
# http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:]
return keyword.strip()
else:
val_ind_idx = self._image.find(VALUE_INDICATOR)
if 0 <= val_ind_idx <= KEYWORD_LENGTH:
# The value indicator should appear in byte 8, but we are
# flexible and allow this to be fixed
if val_ind_idx < KEYWORD_LENGTH:
keyword = keyword[:val_ind_idx]
keyword_upper = keyword_upper[:val_ind_idx]
rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN :]
# So far this looks like a standard FITS keyword; check whether
# the value represents a RVKC; if so then we pass things off to
# the RVKC parser
if self._check_if_rvkc_image(keyword, rest):
return self._keyword
return keyword_upper
else:
warnings.warn(
"The following header keyword is invalid or follows an "
"unrecognized non-standard convention:\n{}".format(self._image),
AstropyUserWarning,
)
self._invalid = True
return keyword
def _parse_value(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# Likewise for invalid cards
if self.keyword.upper() in self._commentary_keywords or self._invalid:
return self._image[KEYWORD_LENGTH:].rstrip()
if self._check_if_rvkc(self._image):
return self._value
m = self._value_NFSC_RE.match(self._split()[1])
if m is None:
raise VerifyError(
f"Unparsable card ({self.keyword}), fix it first with .verify('fix')."
)
if m.group("bool") is not None:
value = m.group("bool") == "T"
elif m.group("strg") is not None:
value = re.sub("''", "'", m.group("strg"))
elif m.group("numr") is not None:
# Check for numbers with leading 0s.
numr = self._number_NFSC_RE.match(m.group("numr"))
digt = translate(numr.group("digt"), FIX_FP_TABLE2, " ")
if numr.group("sign") is None:
sign = ""
else:
sign = numr.group("sign")
value = _str_to_num(sign + digt)
elif m.group("cplx") is not None:
# Check for numbers with leading 0s.
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE2, " ")
if real.group("sign") is None:
rsign = ""
else:
rsign = real.group("sign")
value = _str_to_num(rsign + rdigt)
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE2, " ")
if imag.group("sign") is None:
isign = ""
else:
isign = imag.group("sign")
value += _str_to_num(isign + idigt) * 1j
else:
value = UNDEFINED
if not self._valuestring:
self._valuestring = m.group("valu")
return value
def _parse_comment(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# likewise for invalid/unparsable cards
if self.keyword in Card._commentary_keywords or self._invalid:
return ""
valuecomment = self._split()[1]
m = self._value_NFSC_RE.match(valuecomment)
comment = ""
if m is not None:
# Don't combine this if statement with the one above, because
# we only want the elif case to run if this was not a valid
# card at all
if m.group("comm"):
comment = m.group("comm").rstrip()
elif "/" in valuecomment:
# The value in this FITS file was not in a valid/known format. In
# this case the best we can do is guess that everything after the
# first / was meant to be the comment
comment = valuecomment.split("/", 1)[1].strip()
return comment
def _split(self):
"""
Split the card image between the keyword and the rest of the card.
"""
if self._image is not None:
# If we already have a card image, don't try to rebuild a new card
# image, which self.image would do
image = self._image
else:
image = self.image
# Split cards with CONTINUE cards or commentary keywords with long
# values
if len(self._image) > self.length:
values = []
comments = []
keyword = None
for card in self._itersubcards():
kw, vc = card._split()
if keyword is None:
keyword = kw
if keyword in self._commentary_keywords:
values.append(vc)
continue
# Should match a string followed by a comment; if not it
# might be an invalid Card, so we just take it verbatim
m = self._strg_comment_RE.match(vc)
if not m:
return kw, vc
value = m.group("strg") or ""
value = value.rstrip()
if value and value[-1] == "&":
value = value[:-1]
values.append(value)
comment = m.group("comm")
if comment:
comments.append(comment.rstrip())
if keyword in self._commentary_keywords:
valuecomment = "".join(values)
else:
# CONTINUE card
valuecomment = f"'{''.join(values)}' / {' '.join(comments)}"
return keyword, valuecomment
if self.keyword in self._special_keywords:
keyword, valuecomment = image.split(" ", 1)
else:
try:
delim_index = image.index(self._value_indicator)
except ValueError:
delim_index = None
# The equal sign may not be any higher than column 10; anything
# past that must be considered part of the card value
if delim_index is None:
keyword = image[:KEYWORD_LENGTH]
valuecomment = image[KEYWORD_LENGTH:]
elif delim_index > 10 and image[:9] != "HIERARCH ":
keyword = image[:8]
valuecomment = image[8:]
else:
keyword, valuecomment = image.split(self._value_indicator, 1)
return keyword.strip(), valuecomment.strip()
def _fix_keyword(self):
if self.field_specifier:
keyword, field_specifier = self._keyword.split(".", 1)
self._keyword = f"{keyword.upper()}.{field_specifier}"
else:
self._keyword = self._keyword.upper()
self._modified = True
def _fix_value(self):
"""Fix the card image for fixable non-standard compliance."""
value = None
keyword, valuecomment = self._split()
m = self._value_NFSC_RE.match(valuecomment)
# for the unparsable case
if m is None:
try:
value, comment = valuecomment.split("/", 1)
self.value = value.strip()
self.comment = comment.strip()
except (ValueError, IndexError):
self.value = valuecomment
self._valuestring = self._value
return
elif m.group("numr") is not None:
numr = self._number_NFSC_RE.match(m.group("numr"))
value = translate(numr.group("digt"), FIX_FP_TABLE, " ")
if numr.group("sign") is not None:
value = numr.group("sign") + value
elif m.group("cplx") is not None:
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE, " ")
if real.group("sign") is not None:
rdigt = real.group("sign") + rdigt
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE, " ")
if imag.group("sign") is not None:
idigt = imag.group("sign") + idigt
value = f"({rdigt}, {idigt})"
self._valuestring = value
# The value itself has not been modified, but its serialized
# representation (as stored in self._valuestring) has been changed, so
# still set this card as having been modified (see ticket #137)
self._modified = True
def _format_keyword(self):
if self.keyword:
if self.field_specifier:
keyword = self.keyword.split(".", 1)[0]
return "{:{len}}".format(keyword, len=KEYWORD_LENGTH)
elif self._hierarch:
return f"HIERARCH {self.keyword} "
else:
return "{:{len}}".format(self.keyword, len=KEYWORD_LENGTH)
else:
return " " * KEYWORD_LENGTH
def _format_value(self):
# value string
float_types = (float, np.floating, complex, np.complexfloating)
# Force the value to be parsed out first
value = self.value
# But work with the underlying raw value instead (to preserve
# whitespace, for now...)
value = self._value
if self.keyword in self._commentary_keywords:
# The value of a commentary card must be just a raw unprocessed
# string
value = str(value)
elif (
self._valuestring
and not self._valuemodified
and isinstance(self.value, float_types)
):
# Keep the existing formatting for float/complex numbers
value = f"{self._valuestring:>20}"
elif self.field_specifier:
value = _format_value(self._value).strip()
value = f"'{self.field_specifier}: {value}'"
else:
value = _format_value(value)
# For HIERARCH cards the value should be shortened to conserve space
if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH:
value = value.strip()
return value
def _format_comment(self):
if not self.comment:
return ""
else:
return f" / {self._comment}"
def _format_image(self):
keyword = self._format_keyword()
value = self._format_value()
is_commentary = keyword.strip() in self._commentary_keywords
if is_commentary:
comment = ""
else:
comment = self._format_comment()
# equal sign string
# by default use the standard value indicator even for HIERARCH cards;
# later we may abbreviate it if necessary
delimiter = VALUE_INDICATOR
if is_commentary:
delimiter = ""
# put all parts together
output = f"{keyword}{delimiter}{value}{comment}"
# For HIERARCH cards we can save a bit of space if necessary by
# removing the space between the keyword and the equals sign; I'm
# guessing this is part of the HIEARCH card specification
keywordvalue_length = len(keyword) + len(delimiter) + len(value)
if keywordvalue_length > self.length and keyword.startswith("HIERARCH"):
if keywordvalue_length == self.length + 1 and keyword[-1] == " ":
output = "".join([keyword[:-1], delimiter, value, comment])
else:
# I guess the HIERARCH card spec is incompatible with CONTINUE
# cards
raise ValueError(
f"The header keyword {self.keyword!r} with its value is too long"
)
if len(output) <= self.length:
output = f"{output:80}"
else:
# longstring case (CONTINUE card)
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if isinstance(self.value, str) and len(value) > (self.length - 10):
output = self._format_long_image()
else:
warnings.warn(
"Card is too long, comment will be truncated.", VerifyWarning
)
output = output[: Card.length]
return output
def _format_long_image(self):
"""
Break up long string value/comment into ``CONTINUE`` cards.
This is a primitive implementation: it will put the value
string in one block and the comment string in another. Also,
it does not break at the blank space between words. So it may
not look pretty.
"""
if self.keyword in Card._commentary_keywords:
return self._format_long_commentary_image()
value_length = 67
comment_length = 64
output = []
# do the value string
value = self._value.replace("'", "''")
words = _words_group(value, value_length)
for idx, word in enumerate(words):
if idx == 0:
headstr = "{:{len}}= ".format(self.keyword, len=KEYWORD_LENGTH)
else:
headstr = "CONTINUE "
# If this is the final CONTINUE remove the '&'
if not self.comment and idx == len(words) - 1:
value_format = "'{}'"
else:
value_format = "'{}&'"
value = value_format.format(word)
output.append(f"{headstr + value:80}")
# do the comment string
comment_format = "{}"
if self.comment:
words = _words_group(self.comment, comment_length)
for idx, word in enumerate(words):
# If this is the final CONTINUE remove the '&'
if idx == len(words) - 1:
headstr = "CONTINUE '' / "
else:
headstr = "CONTINUE '&' / "
comment = headstr + comment_format.format(word)
output.append(f"{comment:80}")
return "".join(output)
def _format_long_commentary_image(self):
"""
If a commentary card's value is too long to fit on a single card, this
will render the card as multiple consecutive commentary card of the
same type.
"""
maxlen = Card.length - KEYWORD_LENGTH
value = self._format_value()
output = []
idx = 0
while idx < len(value):
output.append(str(Card(self.keyword, value[idx : idx + maxlen])))
idx += maxlen
return "".join(output)
def _verify(self, option="warn"):
errs = []
fix_text = f"Fixed {self.keyword!r} card to meet the FITS standard."
# Don't try to verify cards that already don't meet any recognizable
# standard
if self._invalid:
return _ErrList(errs)
# verify the equal sign position
if self.keyword not in self._commentary_keywords and (
self._image
and self._image[:9].upper() != "HIERARCH "
and self._image.find("=") != 8
):
errs.append(
{
"err_text": (
"Card {!r} is not FITS standard (equal sign not "
"at column 8).".format(self.keyword)
),
"fix_text": fix_text,
"fix": self._fix_value,
}
)
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keys.
if (self._image and self._image[:8].upper() == "HIERARCH") or self._hierarch:
pass
else:
if self._image:
# PyFITS will auto-uppercase any standard keyword, so lowercase
# keywords can only occur if they came from the wild
keyword = self._split()[0]
if keyword != keyword.upper():
# Keyword should be uppercase unless it's a HIERARCH card
errs.append(
{
"err_text": f"Card keyword {keyword!r} is not upper case.",
"fix_text": fix_text,
"fix": self._fix_keyword,
}
)
keyword = self.keyword
if self.field_specifier:
keyword = keyword.split(".", 1)[0]
if not self._keywd_FSC_RE.match(keyword):
errs.append(
{"err_text": f"Illegal keyword name {keyword!r}", "fixable": False}
)
# verify the value, it may be fixable
keyword, valuecomment = self._split()
if self.keyword in self._commentary_keywords:
# For commentary keywords all that needs to be ensured is that it
# contains only printable ASCII characters
if not self._ascii_text_re.match(valuecomment):
errs.append(
{
"err_text": (
f"Unprintable string {valuecomment!r}; commentary "
"cards may only contain printable ASCII characters"
),
"fixable": False,
}
)
else:
if not self._valuemodified:
m = self._value_FSC_RE.match(valuecomment)
# If the value of a card was replaced before the card was ever
# even verified, the new value can be considered valid, so we
# don't bother verifying the old value. See
# https://github.com/astropy/astropy/issues/5408
if m is None:
errs.append(
{
"err_text": (
f"Card {self.keyword!r} is not FITS standard "
f"(invalid value string: {valuecomment!r})."
),
"fix_text": fix_text,
"fix": self._fix_value,
}
)
# verify the comment (string), it is never fixable
m = self._value_NFSC_RE.match(valuecomment)
if m is not None:
comment = m.group("comm")
if comment is not None:
if not self._ascii_text_re.match(comment):
errs.append(
{
"err_text": (
f"Unprintable string {comment!r}; header comments "
"may only contain printable ASCII characters"
),
"fixable": False,
}
)
errs = _ErrList([self.run_option(option, **err) for err in errs])
self._verified = True
return errs
def _itersubcards(self):
"""
If the card image is greater than 80 characters, it should consist of a
normal card followed by one or more CONTINUE card. This method returns
the subcards that make up this logical card.
This can also support the case where a HISTORY or COMMENT card has a
long value that is stored internally as multiple concatenated card
images.
"""
ncards = len(self._image) // Card.length
for idx in range(0, Card.length * ncards, Card.length):
card = Card.fromstring(self._image[idx : idx + Card.length])
if idx > 0 and card.keyword.upper() not in self._special_keywords:
raise VerifyError(
"Long card images must have CONTINUE cards after "
"the first card or have commentary keywords like "
"HISTORY or COMMENT."
)
if not isinstance(card.value, str):
raise VerifyError("CONTINUE cards must have string values.")
yield card
def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e))
def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == "":
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = f"'{exp_val_str:8}'"
return f"{val_str:20}"
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return f"{repr(value)[0]:>20}" # T or F
elif _is_int(value):
return f"{value:>20d}"
elif isinstance(value, (float, np.floating)):
return f"{_format_float(value):>20}"
elif isinstance(value, (complex, np.complexfloating)):
val_str = f"({_format_float(value.real)}, {_format_float(value.imag)})"
return f"{val_str:>20}"
elif isinstance(value, Undefined):
return ""
else:
return ""
def _format_float(value):
"""Format a floating number to make sure it is at most 20 characters."""
value_str = str(value).replace("e", "E")
# Limit the value string to at most 20 characters.
if (str_len := len(value_str)) > 20:
idx = value_str.find("E")
if idx < 0:
# No scientific notation, truncate decimal places
value_str = value_str[:20]
else:
# Scientific notation, truncate significand (mantissa)
value_str = value_str[: 20 - (str_len - idx)] + value_str[idx:]
return value_str
def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + " " * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + " " * (Card.length - strlen)
|
0cdae15e2194386be868da9010f5e34c3d4b33bc09b3b284a5a7a6a12986dc45 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
cds.py:
Classes to read CDS / Vizier table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import fnmatch
import itertools
import os
import re
from contextlib import suppress
from astropy.units import Unit
from . import core, fixedwidth
__doctest_skip__ = ["*"]
class CdsHeader(core.BaseHeader):
_subfmt = "CDS"
col_type_map = {
"e": core.FloatType,
"f": core.FloatType,
"i": core.IntType,
"a": core.StrType,
}
"The ReadMe file to construct header from."
readme = None
def get_type_map_key(self, col):
match = re.match(r"\d*(\S)", col.raw_type.lower())
if not match:
raise ValueError(
f'Unrecognized {self._subfmt} format "{col.raw_type}" for column'
f'"{col.name}"'
)
return match.group(1)
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a CDS/MRT
header.
Parameters
----------
lines : list
List of table lines
"""
# Read header block for the table ``self.data.table_name`` from the read
# me file ``self.readme``.
if self.readme and self.data.table_name:
in_header = False
readme_inputter = core.BaseInputter()
f = readme_inputter.get_lines(self.readme)
# Header info is not in data lines but in a separate file.
lines = []
comment_lines = 0
for line in f:
line = line.strip()
if in_header:
lines.append(line)
if line.startswith(("------", "=======")):
comment_lines += 1
if comment_lines == 3:
break
else:
match = re.match(
r"Byte-by-byte Description of file: (?P<name>.+)$",
line,
re.IGNORECASE,
)
if match:
# Split 'name' in case in contains multiple files
names = [s for s in re.split("[, ]+", match.group("name")) if s]
# Iterate on names to find if one matches the tablename
# including wildcards.
for pattern in names:
if fnmatch.fnmatch(self.data.table_name, pattern):
in_header = True
lines.append(line)
break
else:
raise core.InconsistentTableError(
f"Can't find table {self.data.table_name} in {self.readme}"
)
found_line = False
for i_col_def, line in enumerate(lines):
if re.match(r"Byte-by-byte Description", line, re.IGNORECASE):
found_line = True
elif found_line: # First line after list of file descriptions
i_col_def -= 1 # Set i_col_def to last description line
break
else:
raise ValueError('no line with "Byte-by-byte Description" found')
re_col_def = re.compile(
r"""\s*
(?P<start> \d+ \s* -)? \s*
(?P<end> \d+) \s+
(?P<format> [\w.]+) \s+
(?P<units> \S+) \s+
(?P<name> \S+)
(\s+ (?P<descr> \S.*))?""",
re.VERBOSE,
)
cols = []
for line in itertools.islice(lines, i_col_def + 4, None):
if line.startswith(("------", "=======")):
break
match = re_col_def.match(line)
if match:
col = core.Column(name=match.group("name"))
col.start = int(
re.sub(r'[-\s]', '', match.group('start') or match.group('end'))) - 1 # fmt: skip
col.end = int(match.group("end"))
unit = match.group("units")
if unit == "---":
col.unit = None # "---" is the marker for no unit in CDS/MRT table
else:
col.unit = Unit(unit, format="cds", parse_strict="warn")
col.description = (match.group("descr") or "").strip()
col.raw_type = match.group("format")
col.type = self.get_col_type(col)
match = re.match(
# Matches limits specifier (eg []) that may or may not be
# present
r"(?P<limits>[\[\]] \S* [\[\]])?"
# Matches '?' directly
r"\?"
# Matches to nullval if and only if '=' is present
r"((?P<equal>=)(?P<nullval> \S*))?"
# Matches to order specifier: ('+', '-', '+=', '-=')
r"(?P<order>[-+]?[=]?)"
# Matches description text even even if no whitespace is
# present after '?'
r"(\s* (?P<descriptiontext> \S.*))?",
col.description,
re.VERBOSE,
)
if match:
col.description = (match.group("descriptiontext") or "").strip()
if issubclass(col.type, core.FloatType):
fillval = "nan"
else:
fillval = "0"
if match.group("nullval") == "-":
col.null = "---"
# CDS/MRT tables can use -, --, ---, or ---- to mark missing values
# see https://github.com/astropy/astropy/issues/1335
for i in [1, 2, 3, 4]:
self.data.fill_values.append(("-" * i, fillval, col.name))
else:
col.null = match.group("nullval")
if col.null is None:
col.null = ""
self.data.fill_values.append((col.null, fillval, col.name))
cols.append(col)
else: # could be a continuation of the previous col's description
if cols:
cols[-1].description += line.strip()
else:
raise ValueError(f'Line "{line}" not parsable as CDS header')
self.names = [x.name for x in cols]
self.cols = cols
class CdsData(core.BaseData):
"""CDS table data reader."""
_subfmt = "CDS"
splitter_class = fixedwidth.FixedWidthSplitter
def process_lines(self, lines):
"""Skip over CDS/MRT header by finding the last section delimiter."""
# If the header has a ReadMe and data has a filename
# then no need to skip, as the data lines do not have header
# info. The ``read`` method adds the table_name to the ``data``
# attribute.
if self.header.readme and self.table_name:
return lines
i_sections = [
i for i, x in enumerate(lines) if x.startswith(("------", "======="))
]
if not i_sections:
raise core.InconsistentTableError(
f"No {self._subfmt} section delimiter found"
)
return lines[i_sections[-1] + 1 :]
class Cds(core.BaseReader):
"""CDS format table.
See: https://vizier.unistra.fr/doc/catstd.htx
Example::
Table: Table name here
= ==============================================================================
Catalog reference paper
Bibliography info here
================================================================================
ADC_Keywords: Keyword ; Another keyword ; etc
Description:
Catalog description here.
================================================================================
Byte-by-byte Description of file: datafile3.txt
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 3 I3 --- Index Running identification number
5- 6 I2 h RAh Hour of Right Ascension (J2000)
8- 9 I2 min RAm Minute of Right Ascension (J2000)
11- 15 F5.2 s RAs Second of Right Ascension (J2000)
--------------------------------------------------------------------------------
Note (1): A CDS file can contain sections with various metadata.
Notes can be multiple lines.
Note (2): Another note.
--------------------------------------------------------------------------------
1 03 28 39.09
2 04 18 24.11
**About parsing the CDS format**
The CDS format consists of a table description and the table data. These
can be in separate files as a ``ReadMe`` file plus data file(s), or
combined in a single file. Different subsections within the description
are separated by lines of dashes or equal signs ("------" or "======").
The table which specifies the column information must be preceded by a line
starting with "Byte-by-byte Description of file:".
In the case where the table description is combined with the data values,
the data must be in the last section and must be preceded by a section
delimiter line (dashes or equal signs only).
**Basic usage**
Use the ``ascii.read()`` function as normal, with an optional ``readme``
parameter indicating the CDS ReadMe file. If not supplied it is assumed that
the header information is at the top of the given table. Examples::
>>> from astropy.io import ascii
>>> table = ascii.read("data/cds.dat")
>>> table = ascii.read("data/vizier/table1.dat", readme="data/vizier/ReadMe")
>>> table = ascii.read("data/cds/multi/lhs2065.dat", readme="data/cds/multi/ReadMe")
>>> table = ascii.read("data/cds/glob/lmxbrefs.dat", readme="data/cds/glob/ReadMe")
The table name and the CDS ReadMe file can be entered as URLs. This can be used
to directly load tables from the Internet. For example, Vizier tables from the
CDS::
>>> table = ascii.read("ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/snrs.dat",
... readme="ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/ReadMe")
If the header (ReadMe) and data are stored in a single file and there
is content between the header and the data (for instance Notes), then the
parsing process may fail. In this case you can instruct the reader to
guess the actual start of the data by supplying ``data_start='guess'`` in the
call to the ``ascii.read()`` function. You should verify that the output
data table matches expectation based on the input CDS file.
**Using a reader object**
When ``Cds`` reader object is created with a ``readme`` parameter
passed to it at initialization, then when the ``read`` method is
executed with a table filename, the header information for the
specified table is taken from the ``readme`` file. An
``InconsistentTableError`` is raised if the ``readme`` file does not
have header information for the given table.
>>> readme = "data/vizier/ReadMe"
>>> r = ascii.get_reader(ascii.Cds, readme=readme)
>>> table = r.read("data/vizier/table1.dat")
>>> # table5.dat has the same ReadMe file
>>> table = r.read("data/vizier/table5.dat")
If no ``readme`` parameter is specified, then the header
information is assumed to be at the top of the given table.
>>> r = ascii.get_reader(ascii.Cds)
>>> table = r.read("data/cds.dat")
>>> #The following gives InconsistentTableError, since no
>>> #readme file was given and table1.dat does not have a header.
>>> table = r.read("data/vizier/table1.dat")
Traceback (most recent call last):
...
InconsistentTableError: No CDS section delimiter found
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = "cds"
_io_registry_format_aliases = ["cds"]
_io_registry_can_write = False
_description = "CDS format table"
data_class = CdsData
header_class = CdsHeader
def __init__(self, readme=None):
super().__init__()
self.header.readme = readme
def write(self, table=None):
"""Not available for the CDS class (raises NotImplementedError)."""
raise NotImplementedError
def read(self, table):
# If the read kwarg `data_start` is 'guess' then the table may have extraneous
# lines between the end of the header and the beginning of data.
if self.data.start_line == "guess":
# Replicate the first part of BaseReader.read up to the point where
# the table lines are initially read in.
with suppress(TypeError):
# For strings only
if os.linesep not in table + "":
self.data.table_name = os.path.basename(table)
self.data.header = self.header
self.header.data = self.data
# Get a list of the lines (rows) in the table
lines = self.inputter.get_lines(table)
# Now try increasing data.start_line by one until the table reads successfully.
# For efficiency use the in-memory list of lines instead of `table`, which
# could be a file.
for data_start in range(len(lines)):
self.data.start_line = data_start
with suppress(Exception):
table = super().read(lines)
return table
else:
return super().read(table)
|
24193c481603d095190185f8af57ad3bd9178d24f311d12cddd074f52e0c7e88 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import copy
import csv
import fnmatch
import functools
import inspect
import itertools
import operator
import os
import re
import warnings
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from . import connect
from .docs import READ_DOCSTRING, WRITE_DOCSTRING
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
def _check_multidim_table(table, max_ndim):
"""Check that ``table`` has only columns with ndim <= ``max_ndim``.
Currently ECSV is the only built-in format that supports output of arbitrary
N-d columns, but HTML supports 2-d.
"""
# No limit?
if max_ndim is None:
return
# Check for N-d columns
nd_names = [col.info.name for col in table.itercols() if len(col.shape) > max_ndim]
if nd_names:
raise ValueError(
f"column(s) with dimension > {max_ndim} "
"cannot be be written with this format, try using 'ecsv' "
"(Enhanced CSV) format"
)
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = "2b=48Av%0-V3p>bX"
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (
dialect.delimiter == " "
)
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == "":
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == "":
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked.
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
"""All instances of this class shall have the same hash."""
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **fill_values** : dict of fill values
* **shape** : list of element shape (default [] => scalar)
* **data** : list of converted column values
* **subtype** : actual datatype for columns serialized with JSON
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
self.shape = []
self.subtype = None
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table, newline=None):
"""Get the lines from the ``table`` input.
The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file-like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a
``read()`` method, or a list of strings.
newline :
Line separator. If `None` use OS default from ``splitlines()``.
Returns
-------
lines : list
List of lines
"""
try:
if hasattr(table, "read") or (
"\n" not in table + "" and "\r" not in table + ""
):
with get_readable_fileobj(table, encoding=self.encoding) as fileobj:
table = fileobj.read()
if newline is None:
lines = table.splitlines()
else:
lines = table.split(newline)
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
if len(table) > 1:
lines = table
else:
# treat single entry as if string had been passed directly
if newline is None:
lines = table[0].splitlines()
else:
lines = table[0].split(newline)
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable'
)
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines.
"""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
"""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = " "
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = " "
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first.
"""
if self.delimiter == r"\s":
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip() + "\n"
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip(" \t")
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Yields
------
line : list of str
Each line's split values.
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = " " if self.delimiter == r"\s" else self.delimiter
csv_reader = csv.reader(
lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace,
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = " " if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
)
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals).rstrip("\r\n")
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings.
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = "NONE"
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == "\t" and not in_quote:
char = " "
lastchar = char
newline.append(char)
return "".join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if callable(line_or_func):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader.
"""
auto_format = "col{}"
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ["ASCII_TABLE_WRITE_SPACER_LINE"]
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [
re.sub("^" + self.comment, "", x).strip() for x in comment_lines
]
if comment_lines:
meta.setdefault("table", {})["comments"] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError(
"No data lines found so cannot autogenerate column names"
)
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i) for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError("No header line found in table")
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines."""
re_comment = re.compile(self.comment) if self.comment else None
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment not in (False, None):
for comment in meta.get("comments", []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(
range(self.start_line), itertools.cycle(self.write_spacer_lines)
):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table."""
return tuple(
col.name if isinstance(col, Column) else col.info.name for col in self.cols
)
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
"""
colnames = self.colnames
for name in names:
if name not in colnames:
raise KeyError(f"Column {name} does not exist")
self.cols = [col for col in self.cols if col.name not in names]
def rename_column(self, name, new_name):
"""
Rename a column.
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
"""
try:
idx = self.colnames.index(name)
except ValueError:
raise KeyError(f"Column {name} does not exist")
col = self.cols[idx]
# For writing self.cols can contain cols that are not Column. Raise
# exception in that case.
if isinstance(col, Column):
col.name = new_name
else:
raise TypeError(f"got column type {type(col)} instead of required {Column}")
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError(
f'Unknown data type ""{col.raw_type}"" for column "{col.name}"'
)
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (
_is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads
):
raise InconsistentTableError(
f"Column name {name!r} does not meet strict name requirements"
)
# When guessing require at least two columns, except for ECSV which can
# reliably be guessed from the header requirements.
if (
guessing
and len(self.colnames) <= 1
and self.__class__.__name__ != "EcsvHeader"
):
raise ValueError(
"Table format guessing requires at least two columns, got {}".format(
list(self.colnames)
)
)
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError(
"Length of names argument ({}) does not match number"
" of table columns ({})".format(len(names), len(self.colnames))
)
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ["ASCII_TABLE_WRITE_SPACER_LINE"]
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, "")]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
READ: Strip out comment lines and blank lines from list of ``lines``.
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return list(nonblank_lines)
def get_data_lines(self, lines):
"""
READ: Set ``data_lines`` attribute to lines slice comprising table data values.
"""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line.
"""
return self.splitter(self.data_lines)
def masks(self, cols):
"""READ: Set fill value for each column and then apply that fill value.
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""READ, WRITE: Set fill values of individual cols based on fill_values of BaseData.
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, "fill_values"):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ""
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError(
"Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)"
)
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in (
(i, x)
for i, x in enumerate(self.header.colnames)
if x in affect_cols
):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""READ: Replace string values in col.str_vals and set masks."""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=bool)
for i, str_val in (
(i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values
):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""WRITE: replace string values in col.str_vals."""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in (
(i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values
):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, "mask"):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings.
This sets the fill values and possibly column formats from the input
formats={} keyword, then ends up calling table.pprint._pformat_col_iter()
by a circuitous path. That function does the real work of formatting.
Finally replace anything matching the fill_values.
Returns
-------
values : list of list of str
"""
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
"""Write ``self.cols`` in place to ``lines``.
Parameters
----------
lines : list
List for collecting output of writing self.cols.
"""
if callable(self.start_line):
raise TypeError("Start_line attribute cannot be callable for write()")
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""WRITE: set column formats."""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.info.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://numpy.org/doc/stable/user/basics.types.html>`_
(e.g., numpy.uint, numpy.int8, numpy.int64, numpy.float64) or a python
type covered by a numpy type (e.g., int, float, str, bool).
Returns
-------
converter : callable
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
converter_type : type
``converter_type`` tracks the generic data type produced by the
converter function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if "int" in type_name:
converter_type = IntType
elif "float" in type_name:
converter_type = FloatType
elif "bool" in type_name:
converter_type = BoolType
elif "str" in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all(
(svals == "False") | (svals == "True") | (svals == "0") | (svals == "1")
):
raise ValueError('bool input strings must be False, True, 0, 1, or ""')
vals = numpy.asarray(vals)
trues = (vals == "True") | (vals == "1")
falses = (vals == "False") | (vals == "0")
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False, True, 0, 1, or ""')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
# User-defined converters which gets set in ascii.ui if a `converter` kwarg
# is supplied.
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type).
"""
# Allow specifying a single converter instead of a list of converters.
# The input `converters` must be a ``type`` value that can init np.dtype.
try:
# Don't allow list-like things that dtype accepts
assert type(converters) is type
converters = [numpy.dtype(converters)]
except (AssertionError, TypeError):
pass
converters_out = []
try:
for converter in converters:
try:
converter_func, converter_type = converter
except TypeError as err:
if str(err).startswith("cannot unpack"):
converter_func, converter_type = convert_numpy(converter)
else:
raise
if not issubclass(converter_type, NoType):
raise ValueError("converter_type must be a subclass of NoType")
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError) as err:
raise ValueError(
"Error: invalid format for converters, see "
f"documentation\n{converters}: {err}"
)
return converters_out
def _convert_vals(self, cols):
for col in cols:
for key, converters in self.converters.items():
if fnmatch.fnmatch(col.name, key):
break
else:
if col.dtype is not None:
converters = [convert_numpy(col.dtype)]
else:
converters = self.default_converters
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = "no converters defined"
while not hasattr(col, "data"):
# Try converters, popping the unsuccessful ones from the list.
# If there are no converters left here then fail.
if not col.converters:
raise ValueError(f"Column {col.name} failed to convert: {last_err}")
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError("converter type does not match column type")
try:
col.data = converter_func(col.str_vals)
col.type = converter_type
except (OverflowError, TypeError, ValueError) as err:
# Overflow during conversion (most likely an int that
# doesn't fit in native C long). Put string at the top of
# the converters list for the next while iteration.
# With python/cpython#95778 this has been supplemented with a
# "ValueError: Exceeds the limit (4300) for integer string conversion"
# so need to catch that as well.
if isinstance(err, OverflowError) or (
isinstance(err, ValueError)
and str(err).startswith("Exceeds the limit")
):
warnings.warn(
f"OverflowError converting to {converter_type.__name__} in"
f" column {col.name}, reverting to String.",
AstropyWarning,
)
col.converters.insert(0, convert_numpy(str))
else:
col.converters.pop(0)
last_err = err
def _deduplicate_names(names):
"""Ensure there are no duplicates in ``names``.
This is done by iteratively adding ``_<N>`` to the name for increasing N
until the name is unique.
"""
new_names = []
existing_names = set()
for name in names:
base_name = name + "_"
i = 1
while name in existing_names:
# Iterate until a unique name is found
name = base_name + str(i)
i += 1
new_names.append(name)
existing_names.add(name)
return new_names
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(int), convert_numpy(float), convert_numpy(str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [
numpy.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, "mask") and numpy.any(x.mask)
else x.data
for x in cols
]
out = Table(t_cols, names=[x.name for x in cols], meta=meta["table"])
for col, out_col in zip(cols, out.columns.values()):
for attr in ("format", "unit", "description"):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, "meta"):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get("_format_name")
if format is None:
return
fast = dct.get("_fast")
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ["ascii." + format] + dct.get("_io_registry_format_aliases", [])
if dct.get("_io_registry_suffix"):
func = functools.partial(connect.io_identify, dct["_io_registry_suffix"])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
header = f"ASCII reader '{io_format}' details\n"
func.__doc__ = (
inspect.cleandoc(READ_DOCSTRING).strip()
+ "\n\n"
+ header
+ re.sub(".", "=", header)
+ "\n"
)
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_reader(io_format, Table, func)
if dct.get("_io_registry_can_write", True):
func = functools.partial(connect.io_write, io_format)
header = f"ASCII writer '{io_format}' details\n"
func.__doc__ = (
inspect.cleandoc(WRITE_DOCSTRING).strip()
+ "\n\n"
+ header
+ re.sub(".", "=", header)
+ "\n"
)
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table or BaseHeader.
For the latter this relies on BaseHeader implementing ``colnames``,
``rename_column``, and ``remove_columns``.
Parameters
----------
table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader`
Input table or BaseHeader subclass instance
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
def rename_columns(table, names):
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = "x" * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
if names is not None:
rename_columns(table, names)
else:
colnames_uniq = _deduplicate_names(table.colnames)
if colnames_uniq != list(table.colnames):
rename_columns(table, colnames_uniq)
names_set = set(table.colnames)
if include_names is not None:
names_set.intersection_update(include_names)
if exclude_names is not None:
names_set.difference_update(exclude_names)
if names_set != set(table.colnames):
remove_names = set(table.colnames) - names_set
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
# Max column dimension that writer supports for this format. Exceptions
# include ECSV (no limit) and HTML (max_ndim=2).
max_ndim = 1
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(), cols=OrderedDict())
def _check_multidim_table(self, table):
"""Check that the dimensions of columns in ``table`` are acceptable.
The reader class attribute ``max_ndim`` defines the maximum dimension of
columns that can be written using this format. The base value is ``1``,
corresponding to normal scalar columns with just a length.
Parameters
----------
table : `~astropy.table.Table`
Input table.
Raises
------
ValueError
If any column exceeds the number of allowed dimensions
"""
_check_multidim_table(table, self.max_ndim)
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file-like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + "":
self.data.table_name = os.path.basename(table)
# If one of the newline chars is set as field delimiter, only
# accept the other one as line splitter
if self.header.splitter.delimiter == "\n":
newline = "\r"
elif self.header.splitter.delimiter == "\r":
newline = "\n"
else:
newline = None
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table, newline=newline)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = (
"Number of header columns ({}) inconsistent with"
" data columns ({}) at data line {}\n"
"Header values: {}\n"
"Data values: {}".format(
n_cols, len(str_vals), i, [x.name for x in cols], str_vals
)
)
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, "table_meta"):
self.meta["table"].update(self.header.table_meta)
_apply_include_exclude_names(
self.header, self.names, self.include_names, self.exclude_names
)
table = self.outputter(self.header.cols, self.meta)
self.cols = self.header.cols
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp."""
if not hasattr(self, "lines"):
raise ValueError(
"Table must be read prior to accessing the header comment lines"
)
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(
table, self.names, self.include_names, self.exclude_names
)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Check that table column dimensions are supported by this format class.
# Most formats support only 1-d columns, but some like ECSV support N-d.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined with the subsequent line.
Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = "\\"
replace_char = " "
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append("".join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings."""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (
self.escapechar is None or lastchar != self.escapechar
):
in_quote = not in_quote
if char == "\t" and not in_quote:
char = " "
lastchar = char
newline.append(char)
return "".join(newline)
extra_reader_pars = (
"Reader",
"Inputter",
"Outputter",
"delimiter",
"comment",
"quotechar",
"header_start",
"data_start",
"data_end",
"converters",
"encoding",
"data_Splitter",
"header_Splitter",
"names",
"include_names",
"exclude_names",
"strict_names",
"fill_values",
"fill_include_names",
"fill_exclude_names",
)
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs["Inputter"] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if "fast_reader" in kwargs:
if kwargs["fast_reader"]["enable"] == "force":
raise ParameterError(
"fast_reader required with "
"{}, but this is not a fast C reader: {}".format(
kwargs["fast_reader"], Reader
)
)
else:
del kwargs["fast_reader"] # Otherwise ignore fast_reader parameter
reader_kwargs = {k: v for k, v in kwargs.items() if k not in extra_reader_pars}
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
# csv.reader is hard-coded to recognise either '\r' or '\n' as end-of-line,
# therefore DefaultSplitter cannot handle these as delimiters.
if "delimiter" in kwargs:
if kwargs["delimiter"] in ("\n", "\r", "\r\n"):
reader.header.splitter = BaseSplitter()
reader.data.splitter = BaseSplitter()
reader.header.splitter.delimiter = kwargs["delimiter"]
reader.data.splitter.delimiter = kwargs["delimiter"]
if "comment" in kwargs:
reader.header.comment = kwargs["comment"]
reader.data.comment = kwargs["comment"]
if "quotechar" in kwargs:
reader.header.splitter.quotechar = kwargs["quotechar"]
reader.data.splitter.quotechar = kwargs["quotechar"]
if "data_start" in kwargs:
reader.data.start_line = kwargs["data_start"]
if "data_end" in kwargs:
reader.data.end_line = kwargs["data_end"]
if "header_start" in kwargs:
if reader.header.start_line is not None:
reader.header.start_line = kwargs["header_start"]
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (
("data_start" not in kwargs)
and (default_header_length is not None)
and reader._format_name
not in ["fixed_width_two_line", "commented_header"]
):
reader.data.start_line = (
reader.header.start_line + default_header_length
)
elif kwargs["header_start"] is not None:
# User trying to set a None header start to some value other than None
raise ValueError("header_start cannot be modified for this Reader")
if "converters" in kwargs:
reader.outputter.converters = kwargs["converters"]
if "data_Splitter" in kwargs:
reader.data.splitter = kwargs["data_Splitter"]()
if "header_Splitter" in kwargs:
reader.header.splitter = kwargs["header_Splitter"]()
if "names" in kwargs:
reader.names = kwargs["names"]
if None in reader.names:
raise TypeError("Cannot have None for column name")
if len(set(reader.names)) != len(reader.names):
raise ValueError("Duplicate column names")
if "include_names" in kwargs:
reader.include_names = kwargs["include_names"]
if "exclude_names" in kwargs:
reader.exclude_names = kwargs["exclude_names"]
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if "strict_names" in kwargs:
reader.strict_names = kwargs["strict_names"]
if "fill_values" in kwargs:
reader.data.fill_values = kwargs["fill_values"]
if "fill_include_names" in kwargs:
reader.data.fill_include_names = kwargs["fill_include_names"]
if "fill_exclude_names" in kwargs:
reader.data.fill_exclude_names = kwargs["fill_exclude_names"]
if "encoding" in kwargs:
reader.encoding = kwargs["encoding"]
reader.inputter.encoding = kwargs["encoding"]
return reader
extra_writer_pars = (
"delimiter",
"comment",
"quotechar",
"formats",
"strip_whitespace",
"names",
"include_names",
"exclude_names",
"fill_values",
"fill_include_names",
"fill_exclude_names",
)
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module.
"""
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if "fill_values" in kwargs and kwargs["fill_values"] is None:
del kwargs["fill_values"]
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and f"fast_{Writer._format_name}" in FAST_CLASSES:
# Switch to fast writer
kwargs["fast_writer"] = fast_writer
return FAST_CLASSES[f"fast_{Writer._format_name}"](**kwargs)
writer_kwargs = {k: v for k, v in kwargs.items() if k not in extra_writer_pars}
writer = Writer(**writer_kwargs)
if "delimiter" in kwargs:
writer.header.splitter.delimiter = kwargs["delimiter"]
writer.data.splitter.delimiter = kwargs["delimiter"]
if "comment" in kwargs:
writer.header.write_comment = kwargs["comment"]
writer.data.write_comment = kwargs["comment"]
if "quotechar" in kwargs:
writer.header.splitter.quotechar = kwargs["quotechar"]
writer.data.splitter.quotechar = kwargs["quotechar"]
if "formats" in kwargs:
writer.data.formats = kwargs["formats"]
if "strip_whitespace" in kwargs:
if kwargs["strip_whitespace"]:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller("strip", " \t")
else:
writer.data.splitter.process_val = None
if "names" in kwargs:
writer.header.names = kwargs["names"]
if "include_names" in kwargs:
writer.include_names = kwargs["include_names"]
if "exclude_names" in kwargs:
writer.exclude_names = kwargs["exclude_names"]
if "fill_values" in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs["fill_values"][1] + ""
kwargs["fill_values"] = [kwargs["fill_values"]]
writer.data.fill_values = kwargs["fill_values"] + writer.data.fill_values
if "fill_include_names" in kwargs:
writer.data.fill_include_names = kwargs["fill_include_names"]
if "fill_exclude_names" in kwargs:
writer.data.fill_exclude_names = kwargs["fill_exclude_names"]
return writer
|
7255c76760d08f45dd66e2a81ddff7418b715bea9949095b82c3abc4e55f9f22 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
fixedwidth.py:
Read or write a table with fixed width columns.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
from . import basic, core
from .core import DefaultSplitter, InconsistentTableError
class FixedWidthSplitter(core.BaseSplitter):
"""
Split line based on fixed start and end positions for each ``col`` in
``self.cols``.
This class requires that the Header class will have defined ``col.start``
and ``col.end`` for each column. The reference to the ``header.cols`` gets
put in the splitter object by the base Reader.read() function just in time
for splitting data lines by a ``data`` object.
Note that the ``start`` and ``end`` positions are defined in the pythonic
style so line[start:end] is the desired substring for a column. This splitter
class does not have a hook for ``process_lines`` since that is generally not
useful for fixed-width input.
"""
delimiter_pad = ""
bookend = False
delimiter = "|"
def __call__(self, lines):
for line in lines:
vals = [line[x.start : x.end] for x in self.cols]
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals, widths):
pad = self.delimiter_pad or ""
delimiter = self.delimiter or ""
padded_delim = pad + delimiter + pad
if self.bookend:
bookend_left = delimiter + pad
bookend_right = pad + delimiter
else:
bookend_left = ""
bookend_right = ""
vals = [" " * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class FixedWidthHeaderSplitter(DefaultSplitter):
"""Splitter class that splits on ``|``."""
delimiter = "|"
class FixedWidthHeader(basic.BasicHeader):
"""
Fixed width table header reader.
"""
splitter_class = FixedWidthHeaderSplitter
""" Splitter class for splitting data lines into columns """
position_line = None # secondary header line position
""" row index of line that specifies position (default = 1) """
set_of_position_line_characters = set(r'`~!#$%^&*-_+=\|":' + "'")
def get_line(self, lines, index):
for i, line in enumerate(self.process_lines(lines)):
if i == index:
break
else: # No header line matching
raise InconsistentTableError("No header line found in table")
return line
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
header_rows = getattr(self, "header_rows", ["name"])
# See "else" clause below for explanation of start_line and position_line
start_line = core._get_line_index(self.start_line, self.process_lines(lines))
position_line = core._get_line_index(
self.position_line, self.process_lines(lines)
)
# If start_line is none then there is no header line. Column positions are
# determined from first data line and column names are either supplied by user
# or auto-generated.
if start_line is None:
if position_line is not None:
raise ValueError(
"Cannot set position_line without also setting header_start"
)
# data.data_lines attribute already set via self.data.get_data_lines(lines)
# in BaseReader.read(). This includes slicing for data_start / data_end.
data_lines = self.data.data_lines
if not data_lines:
raise InconsistentTableError(
"No data lines found so cannot autogenerate column names"
)
vals, starts, ends = self.get_fixedwidth_params(data_lines[0])
self.names = [self.auto_format.format(i) for i in range(1, len(vals) + 1)]
else:
# This bit of code handles two cases:
# start_line = <index> and position_line = None
# Single header line where that line is used to determine both the
# column positions and names.
# start_line = <index> and position_line = <index2>
# Two header lines where the first line defines the column names and
# the second line defines the column positions
if position_line is not None:
# Define self.col_starts and self.col_ends so that the call to
# get_fixedwidth_params below will use those to find the header
# column names. Note that get_fixedwidth_params returns Python
# slice col_ends but expects inclusive col_ends on input (for
# more intuitive user interface).
line = self.get_line(lines, position_line)
if len(set(line) - {self.splitter.delimiter, " "}) != 1:
raise InconsistentTableError(
"Position line should only contain delimiters and "
'one other character, e.g. "--- ------- ---".'
)
# The line above lies. It accepts white space as well.
# We don't want to encourage using three different
# characters, because that can cause ambiguities, but white
# spaces are so common everywhere that practicality beats
# purity here.
charset = self.set_of_position_line_characters.union(
{self.splitter.delimiter, " "}
)
if not set(line).issubset(charset):
raise InconsistentTableError(
f"Characters in position line must be part of {charset}"
)
vals, self.col_starts, col_ends = self.get_fixedwidth_params(line)
self.col_ends = [x - 1 if x is not None else None for x in col_ends]
# Get the column names from the header line
line = self.get_line(lines, start_line + header_rows.index("name"))
self.names, starts, ends = self.get_fixedwidth_params(line)
self._set_cols_from_names()
for ii, attr in enumerate(header_rows):
if attr != "name":
line = self.get_line(lines, start_line + ii)
vals = self.get_fixedwidth_params(line)[0]
for col, val in zip(self.cols, vals):
if val:
setattr(col, attr, val)
# Set column start and end positions.
for i, col in enumerate(self.cols):
col.start = starts[i]
col.end = ends[i]
def get_fixedwidth_params(self, line):
"""
Split ``line`` on the delimiter and determine column values and
column start and end positions. This might include null columns with
zero length (e.g. for ``header row = "| col1 || col2 | col3 |"`` or
``header2_row = "----- ------- -----"``). The null columns are
stripped out. Returns the values between delimiters and the
corresponding start and end positions.
Parameters
----------
line : str
Input line
Returns
-------
vals : list
List of values.
starts : list
List of starting indices.
ends : list
List of ending indices.
"""
# If column positions are already specified then just use those.
# If neither column starts or ends are given, figure out positions
# between delimiters. Otherwise, either the starts or the ends have
# been given, so figure out whichever wasn't given.
if self.col_starts is not None and self.col_ends is not None:
starts = list(self.col_starts) # could be any iterable, e.g. np.array
# user supplies inclusive endpoint
ends = [x + 1 if x is not None else None for x in self.col_ends]
if len(starts) != len(ends):
raise ValueError(
"Fixed width col_starts and col_ends must have the same length"
)
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
elif self.col_starts is None and self.col_ends is None:
# There might be a cleaner way to do this but it works...
vals = line.split(self.splitter.delimiter)
starts = [0]
ends = []
for val in vals:
if val:
ends.append(starts[-1] + len(val))
starts.append(ends[-1] + 1)
else:
starts[-1] += 1
starts = starts[:-1]
vals = [x.strip() for x in vals if x]
if len(vals) != len(starts) or len(vals) != len(ends):
raise InconsistentTableError("Error parsing fixed width header")
else:
# exactly one of col_starts or col_ends is given...
if self.col_starts is not None:
starts = list(self.col_starts)
ends = starts[1:] + [None] # Assume each col ends where the next starts
else: # self.col_ends is not None
ends = [x + 1 for x in self.col_ends]
starts = [0] + ends[:-1] # Assume each col starts where the last ended
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
return vals, starts, ends
def write(self, lines):
# Header line not written until data are formatted. Until then it is
# not known how wide each column will be for fixed width.
pass
class FixedWidthData(basic.BasicData):
"""
Base table data reader.
"""
splitter_class = FixedWidthSplitter
""" Splitter class for splitting data lines into columns """
start_line = None
def write(self, lines):
default_header_rows = [] if self.header.start_line is None else ["name"]
header_rows = getattr(self, "header_rows", default_header_rows)
# First part is getting the widths of each column.
# List (rows) of list (column values) for data lines
vals_list = []
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
# List (rows) of list (columns values) for header lines.
hdrs_list = []
for col_attr in header_rows:
vals = [
"" if (val := getattr(col.info, col_attr)) is None else str(val)
for col in self.cols
]
hdrs_list.append(vals)
# Widths for data columns
widths = [
max(len(vals[i_col]) for vals in vals_list)
for i_col in range(len(self.cols))
]
# Incorporate widths for header columns (if there are any)
if hdrs_list:
for i_col in range(len(self.cols)):
widths[i_col] = max(
widths[i_col], *(len(vals[i_col]) for vals in hdrs_list)
)
# Now collect formatted header and data lines into the output lines
for vals in hdrs_list:
lines.append(self.splitter.join(vals, widths))
if self.header.position_line is not None:
vals = [self.header.position_char * width for width in widths]
lines.append(self.splitter.join(vals, widths))
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class FixedWidth(basic.Basic):
"""Fixed width table with single header line defining column names and positions.
Examples::
# Bar delimiter in header and data
| Col1 | Col2 | Col3 |
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Bar delimiter in header only
Col1 | Col2 | Col3
1.2 hello there 3
2.4 many words 7
# No delimiter with column positions specified as input
Col1 Col2Col3
1.2hello there 3
2.4many words 7
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width"
_description = "Fixed width"
header_class = FixedWidthHeader
data_class = FixedWidthData
def __init__(
self,
col_starts=None,
col_ends=None,
delimiter_pad=" ",
bookend=True,
header_rows=None,
):
if header_rows is None:
header_rows = ["name"]
super().__init__()
self.data.splitter.delimiter_pad = delimiter_pad
self.data.splitter.bookend = bookend
self.header.col_starts = col_starts
self.header.col_ends = col_ends
self.header.header_rows = header_rows
self.data.header_rows = header_rows
if self.data.start_line is None:
self.data.start_line = len(header_rows)
class FixedWidthNoHeaderHeader(FixedWidthHeader):
"""Header reader for fixed with tables with no header line."""
start_line = None
class FixedWidthNoHeaderData(FixedWidthData):
"""Data reader for fixed width tables with no header line."""
start_line = 0
class FixedWidthNoHeader(FixedWidth):
"""Fixed width table which has no header line.
When reading, column names are either input (``names`` keyword) or
auto-generated. Column positions are determined either by input
(``col_starts`` and ``col_stops`` keywords) or by splitting the first data
line. In the latter case a ``delimiter`` is required to split the data
line.
Examples::
# Bar delimiter in header and data
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Compact table having no delimiter and column positions specified as input
1.2hello there3
2.4many words 7
This class is just a convenience wrapper around the ``FixedWidth`` reader
but with ``header_start=None`` and ``data_start=0``.
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width_no_header"
_description = "Fixed width with no header"
header_class = FixedWidthNoHeaderHeader
data_class = FixedWidthNoHeaderData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=" ", bookend=True):
super().__init__(
col_starts,
col_ends,
delimiter_pad=delimiter_pad,
bookend=bookend,
header_rows=[],
)
class FixedWidthTwoLineHeader(FixedWidthHeader):
"""Header reader for fixed width tables splitting on whitespace.
For fixed width tables with several header lines, there is typically
a white-space delimited format line, so splitting on white space is
needed.
"""
splitter_class = DefaultSplitter
class FixedWidthTwoLineDataSplitter(FixedWidthSplitter):
"""Splitter for fixed width tables splitting on ``' '``."""
delimiter = " "
class FixedWidthTwoLineData(FixedWidthData):
"""Data reader for fixed with tables with two header lines."""
splitter_class = FixedWidthTwoLineDataSplitter
class FixedWidthTwoLine(FixedWidth):
"""Fixed width table which has two header lines.
The first header line defines the column names and the second implicitly
defines the column positions.
Examples::
# Typical case with column extent defined by ---- under column names.
col1 col2 <== header_start = 0
----- ------------ <== position_line = 1, position_char = "-"
1 bee flies <== data_start = 2
2 fish swims
# Pretty-printed table
+------+------------+
| Col1 | Col2 |
+------+------------+
| 1.2 | "hello" |
| 2.4 | there world|
+------+------------+
See the :ref:`astropy:fixed_width_gallery` for specific usage examples.
"""
_format_name = "fixed_width_two_line"
_description = "Fixed width with second header line"
data_class = FixedWidthTwoLineData
header_class = FixedWidthTwoLineHeader
def __init__(
self,
position_line=None,
position_char="-",
delimiter_pad=None,
bookend=False,
header_rows=None,
):
if len(position_char) != 1:
raise ValueError(
f'Position_char="{position_char}" must be a single character'
)
super().__init__(
delimiter_pad=delimiter_pad, bookend=bookend, header_rows=header_rows
)
if position_line is None:
position_line = len(self.header.header_rows)
self.header.position_line = position_line
self.header.position_char = position_char
self.data.start_line = position_line + 1
|
7b86fe7794107de14080886880a731b3f8db4cdee99a601f3a37976533ad120f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Classes to read AAS MRT table format.
Ref: https://journals.aas.org/mrt-standards
:Copyright: Smithsonian Astrophysical Observatory (2021)
:Author: Tom Aldcroft ([email protected]), \
Suyog Garg ([email protected])
"""
import re
import warnings
from io import StringIO
from math import ceil, floor
from string import Template
from textwrap import wrap
import numpy as np
from astropy import units as u
from astropy.table import Column, MaskedColumn, Table
from . import cds, core, fixedwidth
MAX_SIZE_README_LINE = 80
MAX_COL_INTLIMIT = 100000
__doctest_skip__ = ["*"]
BYTE_BY_BYTE_TEMPLATE = [
"Byte-by-byte Description of file: $file",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
"$bytebybyte",
"--------------------------------------------------------------------------------",
]
MRT_TEMPLATE = [
"Title:",
"Authors:",
"Table:",
"================================================================================",
"$bytebybyte",
"Notes:",
"--------------------------------------------------------------------------------",
]
class MrtSplitter(fixedwidth.FixedWidthSplitter):
"""
Contains the join function to left align the MRT columns
when writing to a file.
"""
def join(self, vals, widths):
vals = [val + " " * (width - len(val)) for val, width in zip(vals, widths)]
return self.delimiter.join(vals)
class MrtHeader(cds.CdsHeader):
_subfmt = "MRT"
def _split_float_format(self, value):
"""
Splits a Float string into different parts to find number
of digits after decimal and check if the value is in Scientific
notation.
Parameters
----------
value : str
String containing the float value to split.
Returns
-------
fmt: (int, int, int, bool, bool)
List of values describing the Float string.
(size, dec, ent, sign, exp)
size, length of the given string.
ent, number of digits before decimal point.
dec, number of digits after decimal point.
sign, whether or not given value signed.
exp, is value in Scientific notation?
"""
regfloat = re.compile(
r"""(?P<sign> [+-]*)
(?P<ent> [^eE.]+)
(?P<deciPt> [.]*)
(?P<decimals> [0-9]*)
(?P<exp> [eE]*-*)[0-9]*""",
re.VERBOSE,
)
mo = regfloat.match(value)
if mo is None:
raise Exception(f"{value} is not a float number")
return (
len(value),
len(mo.group("ent")),
len(mo.group("decimals")),
mo.group("sign") != "",
mo.group("exp") != "",
)
def _set_column_val_limits(self, col):
"""
Sets the ``col.min`` and ``col.max`` column attributes,
taking into account columns with Null values.
"""
col.max = max(col)
col.min = min(col)
if col.max is np.ma.core.MaskedConstant:
col.max = None
if col.min is np.ma.core.MaskedConstant:
col.min = None
def column_float_formatter(self, col):
"""
String formatter function for a column containing Float values.
Checks if the values in the given column are in Scientific notation,
by splitting the value string. It is assumed that the column either has
float values or Scientific notation.
A ``col.formatted_width`` attribute is added to the column. It is not added
if such an attribute is already present, say when the ``formats`` argument
is passed to the writer. A properly formatted format string is also added as
the ``col.format`` attribute.
Parameters
----------
col : A ``Table.Column`` object.
"""
# maxsize: maximum length of string containing the float value.
# maxent: maximum number of digits places before decimal point.
# maxdec: maximum number of digits places after decimal point.
# maxprec: maximum precision of the column values, sum of maxent and maxdec.
maxsize, maxprec, maxent, maxdec = 1, 0, 1, 0
sign = False
fformat = "F"
# Find maximum sized value in the col
for val in col.str_vals:
# Skip null values
if val is None or val == "":
continue
# Find format of the Float string
fmt = self._split_float_format(val)
# If value is in Scientific notation
if fmt[4] is True:
# if the previous column value was in normal Float format
# set maxsize, maxprec and maxdec to default.
if fformat == "F":
maxsize, maxprec, maxdec = 1, 0, 0
# Designate the column to be in Scientific notation.
fformat = "E"
else:
# Move to next column value if
# current value is not in Scientific notation
# but the column is designated as such because
# one of the previous values was.
if fformat == "E":
continue
if maxsize < fmt[0]:
maxsize = fmt[0]
if maxent < fmt[1]:
maxent = fmt[1]
if maxdec < fmt[2]:
maxdec = fmt[2]
if fmt[3]:
sign = True
if maxprec < fmt[1] + fmt[2]:
maxprec = fmt[1] + fmt[2]
if fformat == "E":
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = maxsize
if sign:
col.formatted_width += 1
# Number of digits after decimal is replaced by the precision
# for values in Scientific notation, when writing that Format.
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxprec)
col.format = str(col.formatted_width) + "." + str(maxdec) + "e"
else:
lead = ""
if (
getattr(col, "formatted_width", None) is None
): # If ``formats`` not passed.
col.formatted_width = maxent + maxdec + 1
if sign:
col.formatted_width += 1
elif col.format.startswith("0"):
# Keep leading zero, if already set in format - primarily for `seconds` columns
# in coordinates; may need extra case if this is to be also supported with `sign`.
lead = "0"
col.fortran_format = fformat + str(col.formatted_width) + "." + str(maxdec)
col.format = lead + col.fortran_format[1:] + "f"
def write_byte_by_byte(self):
"""
Writes the Byte-By-Byte description of the table.
Columns that are `astropy.coordinates.SkyCoord` or `astropy.time.TimeSeries`
objects or columns with values that are such objects are recognized as such,
and some predefined labels and description is used for them.
See the Vizier MRT Standard documentation in the link below for more details
on these. An example Byte-By-Byte table is shown here.
See: https://vizier.unistra.fr/doc/catstd-3.1.htx
Example::
--------------------------------------------------------------------------------
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- names Description of names
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
25-31 E7.1 --- s [-9e+34/2.0] Description of s
33-35 I3 --- i [-30/67] Description of i
37-39 F3.1 --- sameF [5.0/5.0] Description of sameF
41-42 I2 --- sameI [20] Description of sameI
44-45 I2 h RAh Right Ascension (hour)
47-48 I2 min RAm Right Ascension (minute)
50-67 F18.15 s RAs Right Ascension (second)
69 A1 --- DE- Sign of Declination
70-71 I2 deg DEd Declination (degree)
73-74 I2 arcmin DEm Declination (arcmin)
76-91 F16.13 arcsec DEs Declination (arcsec)
--------------------------------------------------------------------------------
"""
# Get column widths
vals_list = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max(len(vals[i]) for vals in vals_list)
if self.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
startb = 1 # Byte count starts at 1.
# Set default width of the Bytes count column of the Byte-By-Byte table.
# This ``byte_count_width`` value helps align byte counts with respect
# to the hyphen using a format string.
byte_count_width = len(str(sum(widths) + len(self.cols) - 1))
# Format string for Start Byte and End Byte
singlebfmt = "{:" + str(byte_count_width) + "d}"
fmtb = singlebfmt + "-" + singlebfmt
# Add trailing single whitespaces to Bytes column for better visibility.
singlebfmt += " "
fmtb += " "
# Set default width of Label and Description Byte-By-Byte columns.
max_label_width, max_descrip_size = 7, 16
bbb = Table(
names=["Bytes", "Format", "Units", "Label", "Explanations"], dtype=[str] * 5
)
# Iterate over the columns to write Byte-By-Byte rows.
for i, col in enumerate(self.cols):
# Check if column is MaskedColumn
col.has_null = isinstance(col, MaskedColumn)
if col.format is not None:
col.formatted_width = max(len(sval) for sval in col.str_vals)
# Set MRTColumn type, size and format.
if np.issubdtype(col.dtype, np.integer):
# Integer formatter
self._set_column_val_limits(col)
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = max(len(str(col.max)), len(str(col.min)))
col.fortran_format = "I" + str(col.formatted_width)
if col.format is None:
col.format = ">" + col.fortran_format[1:]
elif np.issubdtype(col.dtype, np.dtype(float).type):
# Float formatter
self._set_column_val_limits(col)
self.column_float_formatter(col)
else:
# String formatter, ``np.issubdtype(col.dtype, str)`` is ``True``.
dtype = col.dtype.str
if col.has_null:
mcol = col
mcol.fill_value = ""
coltmp = Column(mcol.filled(), dtype=str)
dtype = coltmp.dtype.str
# If ``formats`` not passed.
if getattr(col, "formatted_width", None) is None:
col.formatted_width = int(re.search(r"(\d+)$", dtype).group(1))
col.fortran_format = "A" + str(col.formatted_width)
col.format = str(col.formatted_width) + "s"
endb = col.formatted_width + startb - 1
# ``mixin`` columns converted to string valued columns will not have a name
# attribute. In those cases, a ``Unknown`` column label is put, indicating that
# such columns can be better formatted with some manipulation before calling
# the MRT writer.
if col.name is None:
col.name = "Unknown"
# Set column description.
if col.description is not None:
description = col.description
else:
description = "Description of " + col.name
# Set null flag in column description
nullflag = ""
if col.has_null:
nullflag = "?"
# Set column unit
if col.unit is not None:
col_unit = col.unit.to_string("cds")
elif col.name.lower().find("magnitude") > -1:
# ``col.unit`` can still be ``None``, if the unit of column values
# is ``Magnitude``, because ``astropy.units.Magnitude`` is actually a class.
# Unlike other units which are instances of ``astropy.units.Unit``,
# application of the ``Magnitude`` unit calculates the logarithm
# of the values. Thus, the only way to check for if the column values
# have ``Magnitude`` unit is to check the column name.
col_unit = "mag"
else:
col_unit = "---"
# Add col limit values to col description
lim_vals = ""
if (
col.min
and col.max
and not any(
x in col.name for x in ["RA", "DE", "LON", "LAT", "PLN", "PLT"]
)
):
# No col limit values for coordinate columns.
if col.fortran_format[0] == "I":
if (
abs(col.min) < MAX_COL_INTLIMIT
and abs(col.max) < MAX_COL_INTLIMIT
):
if col.min == col.max:
lim_vals = f"[{col.min}]"
else:
lim_vals = f"[{col.min}/{col.max}]"
elif col.fortran_format[0] in ("E", "F"):
lim_vals = (
f"[{floor(col.min * 100) / 100.}/{ceil(col.max * 100) / 100.}]"
)
if lim_vals != "" or nullflag != "":
description = f"{lim_vals}{nullflag} {description}"
# Find the maximum label and description column widths.
if len(col.name) > max_label_width:
max_label_width = len(col.name)
if len(description) > max_descrip_size:
max_descrip_size = len(description)
# Add a row for the Sign of Declination in the bbb table
if col.name == "DEd":
bbb.add_row(
[
singlebfmt.format(startb),
"A1",
"---",
"DE-",
"Sign of Declination",
]
)
col.fortran_format = "I2"
startb += 1
# Add Byte-By-Byte row to bbb table
bbb.add_row(
[
singlebfmt.format(startb)
if startb == endb
else fmtb.format(startb, endb),
"" if col.fortran_format is None else col.fortran_format,
col_unit,
"" if col.name is None else col.name,
description,
]
)
startb = endb + 2
# Properly format bbb columns
bbblines = StringIO()
bbb.write(
bbblines,
format="ascii.fixed_width_no_header",
delimiter=" ",
bookend=False,
delimiter_pad=None,
formats={
"Format": "<6s",
"Units": "<6s",
"Label": "<" + str(max_label_width) + "s",
"Explanations": "" + str(max_descrip_size) + "s",
},
)
# Get formatted bbb lines
bbblines = bbblines.getvalue().splitlines()
# ``nsplit`` is the number of whitespaces to prefix to long description
# lines in order to wrap them. It is the sum of the widths of the
# previous 4 columns plus the number of single spacing between them.
# The hyphen in the Bytes column is also counted.
nsplit = byte_count_width * 2 + 1 + 12 + max_label_width + 4
# Wrap line if it is too long
buff = ""
for newline in bbblines:
if len(newline) > MAX_SIZE_README_LINE:
buff += ("\n").join(
wrap(
newline,
subsequent_indent=" " * nsplit,
width=MAX_SIZE_README_LINE,
)
)
buff += "\n"
else:
buff += newline + "\n"
# Last value of ``endb`` is the sum of column widths after formatting.
self.linewidth = endb
# Remove the last extra newline character from Byte-By-Byte.
buff = buff[:-1]
return buff
def write(self, lines):
"""
Writes the Header of the MRT table, aka ReadMe, which
also contains the Byte-By-Byte description of the table.
"""
from astropy.coordinates import SkyCoord
# Recognised ``SkyCoord.name`` forms with their default column names (helio* require SunPy).
coord_systems = {
"galactic": ("GLAT", "GLON", "b", "l"),
"ecliptic": ("ELAT", "ELON", "lat", "lon"), # 'geocentric*ecliptic'
"heliographic": ("HLAT", "HLON", "lat", "lon"), # '_carrington|stonyhurst'
"helioprojective": ("HPLT", "HPLN", "Ty", "Tx"),
}
eqtnames = ["RAh", "RAm", "RAs", "DEd", "DEm", "DEs"]
# list to store indices of columns that are modified.
to_pop = []
# For columns that are instances of ``SkyCoord`` and other ``mixin`` columns
# or whose values are objects of these classes.
for i, col in enumerate(self.cols):
# If col is a ``Column`` object but its values are ``SkyCoord`` objects,
# convert the whole column to ``SkyCoord`` object, which helps in applying
# SkyCoord methods directly.
if not isinstance(col, SkyCoord) and isinstance(col[0], SkyCoord):
try:
col = SkyCoord(col)
except (ValueError, TypeError):
# If only the first value of the column is a ``SkyCoord`` object,
# the column cannot be converted to a ``SkyCoord`` object.
# These columns are converted to ``Column`` object and then converted
# to string valued column.
if not isinstance(col, Column):
col = Column(col)
col = Column([str(val) for val in col])
self.cols[i] = col
continue
# Replace single ``SkyCoord`` column by its coordinate components if no coordinate
# columns of the corresponding type exist yet.
if isinstance(col, SkyCoord):
# If coordinates are given in RA/DEC, divide each them into hour/deg,
# minute/arcminute, second/arcsecond columns.
if (
"ra" in col.representation_component_names.keys()
and len(set(eqtnames) - set(self.colnames)) == 6
):
ra_c, dec_c = col.ra.hms, col.dec.dms
coords = [
ra_c.h.round().astype("i1"),
ra_c.m.round().astype("i1"),
ra_c.s,
dec_c.d.round().astype("i1"),
dec_c.m.round().astype("i1"),
dec_c.s,
]
coord_units = [u.h, u.min, u.second, u.deg, u.arcmin, u.arcsec]
coord_descrip = [
"Right Ascension (hour)",
"Right Ascension (minute)",
"Right Ascension (second)",
"Declination (degree)",
"Declination (arcmin)",
"Declination (arcsec)",
]
for coord, name, coord_unit, descrip in zip(
coords, eqtnames, coord_units, coord_descrip
):
# Have Sign of Declination only in the DEd column.
if name in ["DEm", "DEs"]:
coord_col = Column(
list(np.abs(coord)),
name=name,
unit=coord_unit,
description=descrip,
)
else:
coord_col = Column(
list(coord),
name=name,
unit=coord_unit,
description=descrip,
)
# Set default number of digits after decimal point for the
# second values, and deg-min to (signed) 2-digit zero-padded integer.
if name == "RAs":
coord_col.format = "013.10f"
elif name == "DEs":
coord_col.format = "012.9f"
elif name == "RAh":
coord_col.format = "2d"
elif name == "DEd":
coord_col.format = "+03d"
elif name.startswith(("RA", "DE")):
coord_col.format = "02d"
self.cols.append(coord_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# For all other coordinate types, simply divide into two columns
# for latitude and longitude resp. with the unit used been as it is.
else:
frminfo = ""
for frame, latlon in coord_systems.items():
if (
frame in col.name
and len(set(latlon[:2]) - set(self.colnames)) == 2
):
if frame != col.name:
frminfo = f" ({col.name})"
lon_col = Column(
getattr(col, latlon[3]),
name=latlon[1],
description=f"{frame.capitalize()} Longitude{frminfo}",
unit=col.representation_component_units[latlon[3]],
format=".12f",
)
lat_col = Column(
getattr(col, latlon[2]),
name=latlon[0],
description=f"{frame.capitalize()} Latitude{frminfo}",
unit=col.representation_component_units[latlon[2]],
format="+.12f",
)
self.cols.append(lon_col)
self.cols.append(lat_col)
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``SkyCoord`` columns that are not in the above three
# representations to string valued columns. Those could either be types not
# supported yet (e.g. 'helioprojective'), or already present and converted.
# If there were any extra ``SkyCoord`` columns of one kind after the first one,
# then their decomposition into their component columns has been skipped.
# This is done in order to not create duplicate component columns.
# Explicit renaming of the extra coordinate component columns by appending some
# suffix to their name, so as to distinguish them, is not yet implemented.
if i not in to_pop:
warnings.warn(
f"Coordinate system of type '{col.name}' already stored in"
" table as CDS/MRT-syle columns or of unrecognized type. So"
f" column {i} is being skipped with designation of a string"
f" valued column `{self.colnames[i]}`.",
UserWarning,
)
self.cols.append(Column(col.to_string(), name=self.colnames[i]))
to_pop.append(i) # Delete original ``SkyCoord`` column.
# Convert all other ``mixin`` columns to ``Column`` objects.
# Parsing these may still lead to errors!
elif not isinstance(col, Column):
col = Column(col)
# If column values are ``object`` types, convert them to string.
if np.issubdtype(col.dtype, np.dtype(object).type):
col = Column([str(val) for val in col])
self.cols[i] = col
# Delete original ``SkyCoord`` columns, if there were any.
for i in to_pop[::-1]:
self.cols.pop(i)
# Check for any left over extra coordinate columns.
if any(x in self.colnames for x in ["RAh", "DEd", "ELON", "GLAT"]):
# At this point any extra ``SkyCoord`` columns should have been converted to string
# valued columns, together with issuance of a warning, by the coordinate parser above.
# This test is just left here as a safeguard.
for i, col in enumerate(self.cols):
if isinstance(col, SkyCoord):
self.cols[i] = Column(col.to_string(), name=self.colnames[i])
message = (
"Table already has coordinate system in CDS/MRT-syle columns. "
f"So column {i} should have been replaced already with "
f"a string valued column `{self.colnames[i]}`."
)
raise core.InconsistentTableError(message)
# Get Byte-By-Byte description and fill the template
bbb_template = Template("\n".join(BYTE_BY_BYTE_TEMPLATE))
byte_by_byte = bbb_template.substitute(
{"file": "table.dat", "bytebybyte": self.write_byte_by_byte()}
)
# Fill up the full ReadMe
rm_template = Template("\n".join(MRT_TEMPLATE))
readme_filled = rm_template.substitute({"bytebybyte": byte_by_byte})
lines.append(readme_filled)
class MrtData(cds.CdsData):
"""MRT table data reader."""
_subfmt = "MRT"
splitter_class = MrtSplitter
def write(self, lines):
self.splitter.delimiter = " "
fixedwidth.FixedWidthData.write(self, lines)
class Mrt(core.BaseReader):
"""AAS MRT (Machine-Readable Table) format table.
**Reading**
::
>>> from astropy.io import ascii
>>> table = ascii.read('data.mrt', format='mrt')
**Writing**
Use ``ascii.write(table, 'data.mrt', format='mrt')`` to write tables to
Machine Readable Table (MRT) format.
Note that the metadata of the table, apart from units, column names and
description, will not be written. These have to be filled in by hand later.
See also: :ref:`cds_mrt_format`.
Caveats:
* The Units and Explanations are available in the column ``unit`` and
``description`` attributes, respectively.
* The other metadata defined by this format is not available in the output table.
"""
_format_name = "mrt"
_io_registry_format_aliases = ["mrt"]
_io_registry_can_write = True
_description = "MRT format table"
data_class = MrtData
header_class = MrtHeader
def write(self, table=None):
# Construct for writing empty table is not yet done.
if len(table) == 0:
raise NotImplementedError
self.data.header = self.header
self.header.position_line = None
self.header.start_line = None
# Create a copy of the ``table``, so that it the copy gets modified and
# written to the file, while the original table remains as it is.
table = table.copy()
return super().write(table)
|
576bece0670c49d2966ccd668f3601650a46d21036da0c27cccca3d2dff1336f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
latex.py:
Classes to read and write LaTeX tables
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
latexdicts = {
"AA": {
"tabletype": "table",
"header_start": r"\hline \hline",
"header_end": r"\hline",
"data_end": r"\hline",
},
"doublelines": {
"tabletype": "table",
"header_start": r"\hline \hline",
"header_end": r"\hline\hline",
"data_end": r"\hline\hline",
},
"template": {
"tabletype": "tabletype",
"caption": "caption",
"tablealign": "tablealign",
"col_align": "col_align",
"preamble": "preamble",
"header_start": "header_start",
"header_end": "header_end",
"data_start": "data_start",
"data_end": "data_end",
"tablefoot": "tablefoot",
"units": {"col1": "unit of col1", "col2": "unit of col2"},
},
}
RE_COMMENT = re.compile(r"(?<!\\)%") # % character but not \%
def add_dictval_to_list(adict, key, alist):
"""
Add a value from a dictionary to a list.
Parameters
----------
adict : dictionary
key : hashable
alist : list
List where value should be added
"""
if key in adict:
if isinstance(adict[key], str):
alist.append(adict[key])
else:
alist.extend(adict[key])
def find_latex_line(lines, latex):
"""
Find the first line which matches a pattern.
Parameters
----------
lines : list
List of strings
latex : str
Search pattern
Returns
-------
line_num : int, None
Line number. Returns None, if no match was found
"""
re_string = re.compile(latex.replace("\\", "\\\\"))
for i, line in enumerate(lines):
if re_string.match(line):
return i
else:
return None
class LatexInputter(core.BaseInputter):
def process_lines(self, lines):
return [lin.strip() for lin in lines]
class LatexSplitter(core.BaseSplitter):
"""Split LaTeX table data. Default delimiter is `&`."""
delimiter = "&"
def __call__(self, lines):
last_line = RE_COMMENT.split(lines[-1])[0].strip()
if not last_line.endswith(r"\\"):
lines[-1] = last_line + r"\\"
return super().__call__(lines)
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. Also remove
\\ at end of line.
"""
line = RE_COMMENT.split(line)[0].strip()
if not line.endswith(r"\\"):
raise core.InconsistentTableError(
r"Lines in LaTeX table have to end with \\"
)
return line.removesuffix(r"\\")
def process_val(self, val):
"""Remove whitespace and {} at the beginning or end of value."""
val = val.strip()
if val and (val[0] == "{") and (val[-1] == "}"):
val = val[1:-1]
return val
def join(self, vals):
"""Join values together and add a few extra spaces for readability."""
delimiter = " " + self.delimiter + " "
return delimiter.join(x.strip() for x in vals) + r" \\"
class LatexHeader(core.BaseHeader):
"""Class to read the header of Latex Tables."""
header_start = r"\begin{tabular}"
splitter_class = LatexSplitter
def start_line(self, lines):
line = find_latex_line(lines, self.header_start)
if line is not None:
return line + 1
else:
return None
def _get_units(self):
units = {}
col_units = [col.info.unit for col in self.cols]
for name, unit in zip(self.colnames, col_units):
if unit:
try:
units[name] = unit.to_string(format="latex_inline")
except AttributeError:
units[name] = unit
return units
def write(self, lines):
if "col_align" not in self.latex:
self.latex["col_align"] = len(self.cols) * "c"
if "tablealign" in self.latex:
align = "[" + self.latex["tablealign"] + "]"
else:
align = ""
if self.latex["tabletype"] is not None:
lines.append(r"\begin{" + self.latex["tabletype"] + r"}" + align)
add_dictval_to_list(self.latex, "preamble", lines)
if "caption" in self.latex:
lines.append(r"\caption{" + self.latex["caption"] + "}")
lines.append(self.header_start + r"{" + self.latex["col_align"] + r"}")
add_dictval_to_list(self.latex, "header_start", lines)
lines.append(self.splitter.join(self.colnames))
units = self._get_units()
if "units" in self.latex:
units.update(self.latex["units"])
if units:
lines.append(
self.splitter.join([units.get(name, " ") for name in self.colnames])
)
add_dictval_to_list(self.latex, "header_end", lines)
class LatexData(core.BaseData):
"""Class to read the data in LaTeX tables."""
data_start = None
data_end = r"\end{tabular}"
splitter_class = LatexSplitter
def start_line(self, lines):
if self.data_start:
return find_latex_line(lines, self.data_start)
else:
start = self.header.start_line(lines)
if start is None:
raise core.InconsistentTableError(r"Could not find table start")
return start + 1
def end_line(self, lines):
if self.data_end:
return find_latex_line(lines, self.data_end)
else:
return None
def write(self, lines):
add_dictval_to_list(self.latex, "data_start", lines)
core.BaseData.write(self, lines)
add_dictval_to_list(self.latex, "data_end", lines)
lines.append(self.data_end)
add_dictval_to_list(self.latex, "tablefoot", lines)
if self.latex["tabletype"] is not None:
lines.append(r"\end{" + self.latex["tabletype"] + "}")
class Latex(core.BaseReader):
r"""LaTeX format table.
This class implements some LaTeX specific commands. Its main
purpose is to write out a table in a form that LaTeX can compile. It
is beyond the scope of this class to implement every possible LaTeX
command, instead the focus is to generate a syntactically valid
LaTeX tables.
This class can also read simple LaTeX tables (one line per table
row, no ``\multicolumn`` or similar constructs), specifically, it
can read the tables that it writes.
Reading a LaTeX table, the following keywords are accepted:
**ignore_latex_commands** :
Lines starting with these LaTeX commands will be treated as comments (i.e. ignored).
When writing a LaTeX table, the some keywords can customize the
format. Care has to be taken here, because python interprets ``\\``
in a string as an escape character. In order to pass this to the
output either format your strings as raw strings with the ``r``
specifier or use a double ``\\\\``.
Examples::
caption = r'My table \label{mytable}'
caption = 'My table \\\\label{mytable}'
**latexdict** : Dictionary of extra parameters for the LaTeX output
* tabletype : used for first and last line of table.
The default is ``\\begin{table}``. The following would generate a table,
which spans the whole page in a two-column document::
ascii.write(data, sys.stdout, Writer = ascii.Latex,
latexdict = {'tabletype': 'table*'})
If ``None``, the table environment will be dropped, keeping only
the ``tabular`` environment.
* tablealign : positioning of table in text.
The default is not to specify a position preference in the text.
If, e.g. the alignment is ``ht``, then the LaTeX will be ``\\begin{table}[ht]``.
* col_align : Alignment of columns
If not present all columns will be centered.
* caption : Table caption (string or list of strings)
This will appear above the table as it is the standard in
many scientific publications. If you prefer a caption below
the table, just write the full LaTeX command as
``latexdict['tablefoot'] = r'\caption{My table}'``
* preamble, header_start, header_end, data_start, data_end, tablefoot: Pure LaTeX
Each one can be a string or a list of strings. These strings
will be inserted into the table without any further
processing. See the examples below.
* units : dictionary of strings
Keys in this dictionary should be names of columns. If
present, a line in the LaTeX table directly below the column
names is added, which contains the values of the
dictionary. Example::
from astropy.io import ascii
data = {'name': ['bike', 'car'], 'mass': [75,1200], 'speed': [10, 130]}
ascii.write(data, Writer=ascii.Latex,
latexdict = {'units': {'mass': 'kg', 'speed': 'km/h'}})
If the column has no entry in the ``units`` dictionary, it defaults
to the **unit** attribute of the column. If this attribute is not
specified (i.e. it is None), the unit will be written as ``' '``.
Run the following code to see where each element of the
dictionary is inserted in the LaTeX table::
from astropy.io import ascii
data = {'cola': [1,2], 'colb': [3,4]}
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['template'])
Some table styles are predefined in the dictionary
``ascii.latex.latexdicts``. The following generates in table in
style preferred by A&A and some other journals::
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['AA'])
As an example, this generates a table, which spans all columns
and is centered on the page::
ascii.write(data, Writer=ascii.Latex, col_align='|lr|',
latexdict={'preamble': r'\begin{center}',
'tablefoot': r'\end{center}',
'tabletype': 'table*'})
**caption** : Set table caption
Shorthand for::
latexdict['caption'] = caption
**col_align** : Set the column alignment.
If not present this will be auto-generated for centered
columns. Shorthand for::
latexdict['col_align'] = col_align
"""
_format_name = "latex"
_io_registry_format_aliases = ["latex"]
_io_registry_suffix = ".tex"
_description = "LaTeX table"
header_class = LatexHeader
data_class = LatexData
inputter_class = LatexInputter
# Strictly speaking latex only supports 1-d columns so this should inherit
# the base max_ndim = 1. But as reported in #11695 this causes a strange
# problem with Jupyter notebook, which displays a table by first calling
# _repr_latex_. For a multidimensional table this issues a stack traceback
# before moving on to _repr_html_. Here we prioritize fixing the issue with
# Jupyter displaying a Table with multidimensional columns.
max_ndim = None
def __init__(
self,
ignore_latex_commands=[
"hline",
"vspace",
"tableline",
"toprule",
"midrule",
"bottomrule",
],
latexdict={},
caption="",
col_align=None,
):
super().__init__()
self.latex = {}
# The latex dict drives the format of the table and needs to be shared
# with data and header
self.header.latex = self.latex
self.data.latex = self.latex
self.latex["tabletype"] = "table"
self.latex.update(latexdict)
if caption:
self.latex["caption"] = caption
if col_align:
self.latex["col_align"] = col_align
self.ignore_latex_commands = ignore_latex_commands
self.header.comment = "%|" + "|".join(
[r"\\" + command for command in self.ignore_latex_commands]
)
self.data.comment = self.header.comment
def write(self, table=None):
self.header.start_line = None
self.data.start_line = None
return core.BaseReader.write(self, table=table)
class AASTexHeaderSplitter(LatexSplitter):
r"""Extract column names from a `deluxetable`_.
This splitter expects the following LaTeX code **in a single line**:
\tablehead{\colhead{col1} & ... & \colhead{coln}}
"""
def __call__(self, lines):
return super(LatexSplitter, self).__call__(lines)
def process_line(self, line):
"""extract column names from tablehead."""
line = line.split("%")[0]
line = line.replace(r"\tablehead", "")
line = line.strip()
if (line[0] == "{") and (line[-1] == "}"):
line = line[1:-1]
else:
raise core.InconsistentTableError(r"\tablehead is missing {}")
return line.replace(r"\colhead", "")
def join(self, vals):
return " & ".join([r"\colhead{" + str(x) + "}" for x in vals])
class AASTexHeader(LatexHeader):
r"""In a `deluxetable
<http://fits.gsfc.nasa.gov/standard30/deluxetable.sty>`_ some header
keywords differ from standard LaTeX.
This header is modified to take that into account.
"""
header_start = r"\tablehead"
splitter_class = AASTexHeaderSplitter
def start_line(self, lines):
return find_latex_line(lines, r"\tablehead")
def write(self, lines):
if "col_align" not in self.latex:
self.latex["col_align"] = len(self.cols) * "c"
if "tablealign" in self.latex:
align = "[" + self.latex["tablealign"] + "]"
else:
align = ""
lines.append(
r"\begin{"
+ self.latex["tabletype"]
+ r"}{"
+ self.latex["col_align"]
+ r"}"
+ align
)
add_dictval_to_list(self.latex, "preamble", lines)
if "caption" in self.latex:
lines.append(r"\tablecaption{" + self.latex["caption"] + "}")
tablehead = " & ".join([r"\colhead{" + name + "}" for name in self.colnames])
units = self._get_units()
if "units" in self.latex:
units.update(self.latex["units"])
if units:
tablehead += r"\\ " + self.splitter.join(
[units.get(name, " ") for name in self.colnames]
)
lines.append(r"\tablehead{" + tablehead + "}")
class AASTexData(LatexData):
r"""In a `deluxetable`_ the data is enclosed in `\startdata` and `\enddata`."""
data_start = r"\startdata"
data_end = r"\enddata"
def start_line(self, lines):
return find_latex_line(lines, self.data_start) + 1
def write(self, lines):
lines.append(self.data_start)
lines_length_initial = len(lines)
core.BaseData.write(self, lines)
# To remove extra space(s) and // appended which creates an extra new line
# in the end.
if len(lines) > lines_length_initial:
lines[-1] = re.sub(r"\s* \\ \\ \s* $", "", lines[-1], flags=re.VERBOSE)
lines.append(self.data_end)
add_dictval_to_list(self.latex, "tablefoot", lines)
lines.append(r"\end{" + self.latex["tabletype"] + r"}")
class AASTex(Latex):
"""AASTeX format table.
This class implements some AASTeX specific commands.
AASTeX is used for the AAS (American Astronomical Society)
publications like ApJ, ApJL and AJ.
It derives from the ``Latex`` reader and accepts the same
keywords. However, the keywords ``header_start``, ``header_end``,
``data_start`` and ``data_end`` in ``latexdict`` have no effect.
"""
_format_name = "aastex"
_io_registry_format_aliases = ["aastex"]
_io_registry_suffix = "" # AASTex inherits from Latex, so override this class attr
_description = "AASTeX deluxetable used for AAS journals"
header_class = AASTexHeader
data_class = AASTexData
def __init__(self, **kwargs):
super().__init__(**kwargs)
# check if tabletype was explicitly set by the user
if not (("latexdict" in kwargs) and ("tabletype" in kwargs["latexdict"])):
self.latex["tabletype"] = "deluxetable"
|
c0ac9cebacf3c6ef8e38cc22f19e00ddec3c2db718d7f577a3278019ce532cbd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ipac.py:
Classes to read IPAC table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from collections import OrderedDict, defaultdict
from textwrap import wrap
from warnings import warn
from astropy.table.pprint import get_auto_format_func
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core, fixedwidth
class IpacFormatErrorDBMS(Exception):
def __str__(self):
return "{}\nSee {}".format(
super().__str__(),
"https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html",
)
class IpacFormatError(Exception):
def __str__(self):
return "{}\nSee {}".format(
super().__str__(),
"https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html",
)
class IpacHeaderSplitter(core.BaseSplitter):
"""Splitter for Ipac Headers.
This splitter is similar its parent when reading, but supports a
fixed width format (as required for Ipac table headers) for writing.
"""
process_line = None
process_val = None
delimiter = "|"
delimiter_pad = ""
skipinitialspace = False
comment = r"\s*\\"
write_comment = r"\\"
col_starts = None
col_ends = None
def join(self, vals, widths):
pad = self.delimiter_pad or ""
delimiter = self.delimiter or ""
padded_delim = pad + delimiter + pad
bookend_left = delimiter + pad
bookend_right = pad + delimiter
vals = [" " * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class IpacHeader(fixedwidth.FixedWidthHeader):
"""IPAC table header."""
splitter_class = IpacHeaderSplitter
# Defined ordered list of possible types. Ordering is needed to
# distinguish between "d" (double) and "da" (date) as defined by
# the IPAC standard for abbreviations. This gets used in get_col_type().
col_type_list = (
("integer", core.IntType),
("long", core.IntType),
("double", core.FloatType),
("float", core.FloatType),
("real", core.FloatType),
("char", core.StrType),
("date", core.StrType),
)
definition = "ignore"
start_line = None
def process_lines(self, lines):
"""Generator to yield IPAC header lines, i.e. those starting and ending with
delimiter character (with trailing whitespace stripped).
"""
delim = self.splitter.delimiter
for line in lines:
line = line.rstrip()
if line.startswith(delim) and line.endswith(delim):
yield line.strip(delim)
def update_meta(self, lines, meta):
"""
Extract table-level comments and keywords for IPAC table. See:
https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html#kw.
"""
def process_keyword_value(val):
"""
Take a string value and convert to float, int or str, and strip quotes
as needed.
"""
val = val.strip()
try:
val = int(val)
except Exception:
try:
val = float(val)
except Exception:
# Strip leading/trailing quote. The spec says that a matched pair
# of quotes is required, but this code will allow a non-quoted value.
for quote in ('"', "'"):
if val.startswith(quote) and val.endswith(quote):
val = val[1:-1]
break
return val
table_meta = meta["table"]
table_meta["comments"] = []
table_meta["keywords"] = OrderedDict()
keywords = table_meta["keywords"]
# fmt: off
re_keyword = re.compile(
r'\\'
r'(?P<name> \w+)'
r'\s* = (?P<value> .+) $',
re.VERBOSE
)
# fmt: on
for line in lines:
# Keywords and comments start with "\". Once the first non-slash
# line is seen then bail out.
if not line.startswith("\\"):
break
m = re_keyword.match(line)
if m:
name = m.group("name")
val = process_keyword_value(m.group("value"))
# IPAC allows for continuation keywords, e.g.
# \SQL = 'WHERE '
# \SQL = 'SELECT (25 column names follow in next row.)'
if name in keywords and isinstance(val, str):
prev_val = keywords[name]["value"]
if isinstance(prev_val, str):
val = prev_val + val
keywords[name] = {"value": val}
else:
# Comment is required to start with "\ "
if line.startswith("\\ "):
val = line[2:].strip()
if val:
table_meta["comments"].append(val)
def get_col_type(self, col):
for col_type_key, col_type in self.col_type_list:
if col_type_key.startswith(col.raw_type.lower()):
return col_type
else:
raise ValueError(
f'Unknown data type ""{col.raw_type}"" for column "{col.name}"'
)
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
# generator returning valid header lines
header_lines = self.process_lines(lines)
header_vals = list(self.splitter(header_lines))
if len(header_vals) == 0:
raise ValueError(
"At least one header line beginning and ending with delimiter required"
)
elif len(header_vals) > 4:
raise ValueError("More than four header lines were found")
# Generate column definitions
cols = []
start = 1
for i, name in enumerate(header_vals[0]):
col = core.Column(name=name.strip(" -"))
col.start = start
col.end = start + len(name)
if len(header_vals) > 1:
col.raw_type = header_vals[1][i].strip(" -")
col.type = self.get_col_type(col)
if len(header_vals) > 2:
col.unit = header_vals[2][i].strip() or None # Can't strip dashes here
if len(header_vals) > 3:
# The IPAC null value corresponds to the io.ascii bad_value.
# In this case there isn't a fill_value defined, so just put
# in the minimal entry that is sure to convert properly to the
# required type.
#
# Strip spaces but not dashes (not allowed in NULL row per
# https://github.com/astropy/astropy/issues/361)
null = header_vals[3][i].strip()
fillval = "" if issubclass(col.type, core.StrType) else "0"
self.data.fill_values.append((null, fillval, col.name))
start = col.end + 1
cols.append(col)
# Correct column start/end based on definition
if self.ipac_definition == "right":
col.start -= 1
elif self.ipac_definition == "left":
col.end += 1
self.names = [x.name for x in cols]
self.cols = cols
def str_vals(self):
if self.DBMS:
IpacFormatE = IpacFormatErrorDBMS
else:
IpacFormatE = IpacFormatError
namelist = self.colnames
if self.DBMS:
countnamelist = defaultdict(int)
for name in self.colnames:
countnamelist[name.lower()] += 1
doublenames = [x for x in countnamelist if countnamelist[x] > 1]
if doublenames != []:
raise IpacFormatE(
"IPAC DBMS tables are not case sensitive. "
f"This causes duplicate column names: {doublenames}"
)
for name in namelist:
m = re.match(r"\w+", name)
if m.end() != len(name):
raise IpacFormatE(
f"{name} - Only alphanumeric characters and _ "
"are allowed in column names."
)
if self.DBMS and not (name[0].isalpha() or (name[0] == "_")):
raise IpacFormatE(f"Column name cannot start with numbers: {name}")
if self.DBMS:
if name in ["x", "y", "z", "X", "Y", "Z"]:
raise IpacFormatE(
f"{name} - x, y, z, X, Y, Z are reserved names and "
"cannot be used as column names."
)
if len(name) > 16:
raise IpacFormatE(
f"{name} - Maximum length for column name is 16 characters"
)
else:
if len(name) > 40:
raise IpacFormatE(
f"{name} - Maximum length for column name is 40 characters."
)
dtypelist = []
unitlist = []
nullist = []
for col in self.cols:
col_dtype = col.info.dtype
col_unit = col.info.unit
col_format = col.info.format
if col_dtype.kind in ["i", "u"]:
if col_dtype.itemsize <= 2:
dtypelist.append("int")
else:
dtypelist.append("long")
elif col_dtype.kind == "f":
if col_dtype.itemsize <= 4:
dtypelist.append("float")
else:
dtypelist.append("double")
else:
dtypelist.append("char")
if col_unit is None:
unitlist.append("")
else:
unitlist.append(str(col.info.unit))
# This may be incompatible with mixin columns
null = col.fill_values[core.masked]
try:
auto_format_func = get_auto_format_func(col)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
nullist.append((format_func(col_format, null)).strip())
except Exception:
# It is possible that null and the column values have different
# data types (e.g. number and null = 'null' (i.e. a string).
# This could cause all kinds of exceptions, so a catch all
# block is needed here
nullist.append(str(null).strip())
return [namelist, dtypelist, unitlist, nullist]
def write(self, lines, widths):
"""Write header.
The width of each column is determined in Ipac.write. Writing the header
must be delayed until that time.
This function is called from there, once the width information is
available.
"""
for vals in self.str_vals():
lines.append(self.splitter.join(vals, widths))
return lines
class IpacDataSplitter(fixedwidth.FixedWidthSplitter):
delimiter = " "
delimiter_pad = ""
bookend = True
class IpacData(fixedwidth.FixedWidthData):
"""IPAC table data reader."""
comment = r"[|\\]"
start_line = 0
splitter_class = IpacDataSplitter
fill_values = [(core.masked, "null")]
def write(self, lines, widths, vals_list):
"""IPAC writer, modified from FixedWidth writer."""
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class Ipac(basic.Basic):
r"""IPAC format table.
See: https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html
Example::
\\name=value
\\ Comment
| column1 | column2 | column3 | column4 | column5 |
| double | double | int | double | char |
| unit | unit | unit | unit | unit |
| null | null | null | null | null |
2.0978 29.09056 73765 2.06000 B8IVpMnHg
Or::
|-----ra---|----dec---|---sao---|------v---|----sptype--------|
2.09708 29.09056 73765 2.06000 B8IVpMnHg
The comments and keywords defined in the header are available via the output
table ``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/data/ipac.dat')
>>> data = ascii.read(filename)
>>> print(data.meta['comments'])
['This is an example of a valid comment']
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'])
...
intval 1
floatval 2300.0
date Wed Sp 20 09:48:36 1995
key_continue IPAC keywords can continue across lines
Note that there are different conventions for characters occurring below the
position of the ``|`` symbol in IPAC tables. By default, any character
below a ``|`` will be ignored (since this is the current standard),
but if you need to read files that assume characters below the ``|``
symbols belong to the column before or after the ``|``, you can specify
``definition='left'`` or ``definition='right'`` respectively when reading
the table (the default is ``definition='ignore'``). The following examples
demonstrate the different conventions:
* ``definition='ignore'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='left'``::
| ra | dec |
| float | float |
1.2345 6.7890
* ``definition='right'``::
| ra | dec |
| float | float |
1.2345 6.7890
IPAC tables can specify a null value in the header that is shown in place
of missing or bad data. On writing, this value defaults to ``null``.
To specify a different null value, use the ``fill_values`` option to
replace masked values with a string or number of your choice as
described in :ref:`astropy:io_ascii_write_parameters`::
>>> from astropy.io.ascii import masked
>>> fill = [(masked, 'N/A', 'ra'), (masked, -999, 'sptype')]
>>> ascii.write(data, format='ipac', fill_values=fill)
\ This is an example of a valid comment
...
| ra| dec| sai| v2| sptype|
| double| double| long| double| char|
| unit| unit| unit| unit| ergs|
| N/A| null| null| null| -999|
N/A 29.09056 null 2.06 -999
2345678901.0 3456789012.0 456789012 4567890123.0 567890123456789012
When writing a table with a column of integers, the data type is output
as ``int`` when the column ``dtype.itemsize`` is less than or equal to 2;
otherwise the data type is ``long``. For a column of floating-point values,
the data type is ``float`` when ``dtype.itemsize`` is less than or equal
to 4; otherwise the data type is ``double``.
Parameters
----------
definition : str, optional
Specify the convention for characters in the data table that occur
directly below the pipe (``|``) symbol in the header column definition:
* 'ignore' - Any character beneath a pipe symbol is ignored (default)
* 'right' - Character is associated with the column to the right
* 'left' - Character is associated with the column to the left
DBMS : bool, optional
If true, this verifies that written tables adhere (semantically)
to the `IPAC/DBMS
<https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html>`_
definition of IPAC tables. If 'False' it only checks for the (less strict)
`IPAC <https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html>`_
definition.
"""
_format_name = "ipac"
_io_registry_format_aliases = ["ipac"]
_io_registry_can_write = True
_description = "IPAC format table"
data_class = IpacData
header_class = IpacHeader
def __init__(self, definition="ignore", DBMS=False):
super().__init__()
# Usually the header is not defined in __init__, but here it need a keyword
if definition in ["ignore", "left", "right"]:
self.header.ipac_definition = definition
else:
raise ValueError("definition should be one of ignore/left/right")
self.header.DBMS = DBMS
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Set a default null value for all columns by adding at the end, which
# is the position with the lowest priority.
# We have to do it this late, because the fill_value
# defined in the class can be overwritten by ui.write
self.data.fill_values.append((core.masked, "null"))
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, self.guessing)
core._apply_include_exclude_names(
table, self.names, self.include_names, self.exclude_names
)
# Check that table has only 1-d columns.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
# Write header and data to lines list
lines = []
# Write meta information
if "comments" in table.meta:
for comment in table.meta["comments"]:
if len(str(comment)) > 78:
warn(
"Comment string > 78 characters was automatically wrapped.",
AstropyUserWarning,
)
for line in wrap(
str(comment), 80, initial_indent="\\ ", subsequent_indent="\\ "
):
lines.append(line)
if "keywords" in table.meta:
keydict = table.meta["keywords"]
for keyword in keydict:
try:
val = keydict[keyword]["value"]
lines.append(f"\\{keyword.strip()}={val!r}")
# meta is not standardized: Catch some common Errors.
except TypeError:
warn(
f"Table metadata keyword {keyword} has been skipped. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}",
AstropyUserWarning,
)
ignored_keys = [
key for key in table.meta if key not in ("keywords", "comments")
]
if any(ignored_keys):
warn(
f"Table metadata keyword(s) {ignored_keys} were not written. "
"IPAC metadata must be in the form {{'keywords':"
"{{'keyword': {{'value': value}} }}",
AstropyUserWarning,
)
# Usually, this is done in data.write, but since the header is written
# first, we need that here.
self.data._set_fill_values(self.data.cols)
# get header and data as strings to find width of each column
for i, col in enumerate(table.columns.values()):
col.headwidth = max(len(vals[i]) for vals in self.header.str_vals())
# keep data_str_vals because they take some time to make
data_str_vals = []
col_str_iters = self.data.str_vals()
for vals in zip(*col_str_iters):
data_str_vals.append(vals)
for i, col in enumerate(table.columns.values()):
# FIXME: In Python 3.4, use max([], default=0).
# See: https://docs.python.org/3/library/functions.html#max
if data_str_vals:
col.width = max(len(vals[i]) for vals in data_str_vals)
else:
col.width = 0
widths = [max(col.width, col.headwidth) for col in table.columns.values()]
# then write table
self.header.write(lines, widths)
self.data.write(lines, widths, data_str_vals)
return lines
|
985a15a295421c05a7bce2a6022ecc82ac93bbf581da53a3442bbf49fb78ad30 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ui.py:
Provides the main user functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import collections
import contextlib
import copy
import os
import re
import sys
import time
import warnings
from io import StringIO
import numpy as np
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import (
basic,
cds,
core,
cparser,
daophot,
ecsv,
fastbasic,
fixedwidth,
html,
ipac,
latex,
mrt,
rst,
sextractor,
)
from .docs import READ_KWARG_TYPES, WRITE_KWARG_TYPES
_read_trace = []
# Default setting for guess parameter in read()
_GUESS = True
def _probably_html(table, maxchars=100000):
"""
Determine if ``table`` probably contains HTML content. See PR #3693 and issue
#3691 for context.
"""
if not isinstance(table, str):
try:
# If table is an iterable (list of strings) then take the first
# maxchars of these. Make sure this is something with random
# access to exclude a file-like object
table[0]
table[:1]
size = 0
for i, line in enumerate(table):
size += len(line)
if size > maxchars:
table = table[: i + 1]
break
table = os.linesep.join(table)
except Exception:
pass
if isinstance(table, str):
# Look for signs of an HTML table in the first maxchars characters
table = table[:maxchars]
# URL ending in .htm or .html
if re.match(
r"( http[s]? | ftp | file ) :// .+ \.htm[l]?$",
table,
re.IGNORECASE | re.VERBOSE,
):
return True
# Filename ending in .htm or .html which exists
if re.search(r"\.htm[l]?$", table[-5:], re.IGNORECASE) and os.path.exists(
os.path.expanduser(table)
):
return True
# Table starts with HTML document type declaration
if re.match(r"\s* <! \s* DOCTYPE \s* HTML", table, re.IGNORECASE | re.VERBOSE):
return True
# Look for <TABLE .. >, <TR .. >, <TD .. > tag openers.
if all(
re.search(rf"< \s* {element} [^>]* >", table, re.IGNORECASE | re.VERBOSE)
for element in ("table", "tr", "td")
):
return True
return False
def set_guess(guess):
"""
Set the default value of the ``guess`` parameter for read().
Parameters
----------
guess : bool
New default ``guess`` value (e.g., True or False)
"""
global _GUESS
_GUESS = guess
def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs):
"""
Initialize a table reader allowing for common customizations. Most of the
default behavior for various parameters is determined by the Reader class.
Parameters
----------
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED). Default is :class:`Basic`.
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dict of converters.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns.
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns.
names : list
List of names corresponding to each data column.
include_names : list, optional
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``).
fill_values : tuple, list of tuple
Specification of fill values for bad or missing table values.
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``).
Returns
-------
reader : `~astropy.io.ascii.BaseReader` subclass
ASCII format reader instance
"""
# This function is a light wrapper around core._get_reader to provide a
# public interface with a default Reader.
if Reader is None:
# Default reader is Basic unless fast reader is forced
fast_reader = _get_fast_reader_dict(kwargs)
if fast_reader["enable"] == "force":
Reader = fastbasic.FastBasic
else:
Reader = basic.Basic
reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs)
return reader
def _get_format_class(format, ReaderWriter, label):
if format is not None and ReaderWriter is not None:
raise ValueError(f"Cannot supply both format and {label} keywords")
if format is not None:
if format in core.FORMAT_CLASSES:
ReaderWriter = core.FORMAT_CLASSES[format]
else:
raise ValueError(
"ASCII format {!r} not in allowed list {}".format(
format, sorted(core.FORMAT_CLASSES)
)
)
return ReaderWriter
def _get_fast_reader_dict(kwargs):
"""Convert 'fast_reader' key in kwargs into a dict if not already and make sure
'enable' key is available.
"""
fast_reader = copy.deepcopy(kwargs.get("fast_reader", True))
if isinstance(fast_reader, dict):
fast_reader.setdefault("enable", "force")
else:
fast_reader = {"enable": fast_reader}
return fast_reader
def _validate_read_write_kwargs(read_write, **kwargs):
"""Validate types of keyword arg inputs to read() or write()."""
def is_ducktype(val, cls):
"""Check if ``val`` is an instance of ``cls`` or "seems" like one:
``cls(val) == val`` does not raise and exception and is `True`. In
this way you can pass in ``np.int16(2)`` and have that count as `int`.
This has a special-case of ``cls`` being 'list-like', meaning it is
an iterable but not a string.
"""
if cls == "list-like":
ok = not isinstance(val, str) and isinstance(val, collections.abc.Iterable)
else:
ok = isinstance(val, cls)
if not ok:
# See if ``val`` walks and quacks like a ``cls```.
try:
new_val = cls(val)
assert new_val == val
except Exception:
ok = False
else:
ok = True
return ok
kwarg_types = READ_KWARG_TYPES if read_write == "read" else WRITE_KWARG_TYPES
for arg, val in kwargs.items():
# Kwarg type checking is opt-in, so kwargs not in the list are considered OK.
# This reflects that some readers allow additional arguments that may not
# be well-specified, e.g. ```__init__(self, **kwargs)`` is an option.
if arg not in kwarg_types or val is None:
continue
# Single type or tuple of types for this arg (like isinstance())
types = kwarg_types[arg]
err_msg = (
f"{read_write}() argument '{arg}' must be a "
f"{types} object, got {type(val)} instead"
)
# Force `types` to be a tuple for the any() check below
if not isinstance(types, tuple):
types = (types,)
if not any(is_ducktype(val, cls) for cls in types):
raise TypeError(err_msg)
def _expand_user_if_path(argument):
if isinstance(argument, (str, bytes, os.PathLike)):
# For the `read()` method, a `str` input can be either a file path or
# the table data itself. File names for io.ascii cannot have newlines
# in them and io.ascii does not accept table data as `bytes`, so we can
# attempt to detect data strings like this.
is_str_data = isinstance(argument, str) and (
"\n" in argument or "\r" in argument
)
if not is_str_data:
# Remain conservative in expanding the presumed-path
ex_user = os.path.expanduser(argument)
if os.path.exists(ex_user):
argument = ex_user
return argument
def read(table, guess=None, **kwargs):
# This the final output from reading. Static analysis indicates the reading
# logic (which is indeed complex) might not define `dat`, thus do so here.
dat = None
# Docstring defined below
del _read_trace[:]
# Downstream readers might munge kwargs
kwargs = copy.deepcopy(kwargs)
_validate_read_write_kwargs("read", **kwargs)
# Convert 'fast_reader' key in kwargs into a dict if not already and make sure
# 'enable' key is available.
fast_reader = _get_fast_reader_dict(kwargs)
kwargs["fast_reader"] = fast_reader
if fast_reader["enable"] and fast_reader.get("chunk_size"):
return _read_in_chunks(table, **kwargs)
if "fill_values" not in kwargs:
kwargs["fill_values"] = [("", "0")]
# If an Outputter is supplied in kwargs that will take precedence.
if (
"Outputter" in kwargs
): # user specified Outputter, not supported for fast reading
fast_reader["enable"] = False
format = kwargs.get("format")
# Dictionary arguments are passed by reference per default and thus need
# special protection:
new_kwargs = copy.deepcopy(kwargs)
kwargs["fast_reader"] = copy.deepcopy(fast_reader)
# Get the Reader class based on possible format and Reader kwarg inputs.
Reader = _get_format_class(format, kwargs.get("Reader"), "Reader")
if Reader is not None:
new_kwargs["Reader"] = Reader
format = Reader._format_name
# Remove format keyword if there, this is only allowed in read() not get_reader()
if "format" in new_kwargs:
del new_kwargs["format"]
if guess is None:
guess = _GUESS
if guess:
# If ``table`` is probably an HTML file then tell guess function to add
# the HTML reader at the top of the guess list. This is in response to
# issue #3691 (and others) where libxml can segfault on a long non-HTML
# file, thus prompting removal of the HTML reader from the default
# guess list.
new_kwargs["guess_html"] = _probably_html(table)
# If `table` is a filename or readable file object then read in the
# file now. This prevents problems in Python 3 with the file object
# getting closed or left at the file end. See #3132, #3013, #3109,
# #2001. If a `readme` arg was passed that implies CDS format, in
# which case the original `table` as the data filename must be left
# intact.
if "readme" not in new_kwargs:
encoding = kwargs.get("encoding")
try:
table = _expand_user_if_path(table)
with get_readable_fileobj(table, encoding=encoding) as fileobj:
table = fileobj.read()
except ValueError: # unreadable or invalid binary file
raise
except Exception:
pass
else:
# Ensure that `table` has at least one \r or \n in it
# so that the core.BaseInputter test of
# ('\n' not in table and '\r' not in table)
# will fail and so `table` cannot be interpreted there
# as a filename. See #4160.
if not re.search(r"[\r\n]", table):
table = table + os.linesep
# If the table got successfully read then look at the content
# to see if is probably HTML, but only if it wasn't already
# identified as HTML based on the filename.
if not new_kwargs["guess_html"]:
new_kwargs["guess_html"] = _probably_html(table)
# Get the table from guess in ``dat``. If ``dat`` comes back as None
# then there was just one set of kwargs in the guess list so fall
# through below to the non-guess way so that any problems result in a
# more useful traceback.
dat = _guess(table, new_kwargs, format, fast_reader)
if dat is None:
guess = False
if not guess:
if format is None:
reader = get_reader(**new_kwargs)
format = reader._format_name
table = _expand_user_if_path(table)
# Try the fast reader version of `format` first if applicable. Note that
# if user specified a fast format (e.g. format='fast_basic') this test
# will fail and the else-clause below will be used.
if fast_reader["enable"] and f"fast_{format}" in core.FAST_CLASSES:
fast_kwargs = copy.deepcopy(new_kwargs)
fast_kwargs["Reader"] = core.FAST_CLASSES[f"fast_{format}"]
fast_reader_rdr = get_reader(**fast_kwargs)
try:
dat = fast_reader_rdr.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(fast_kwargs),
"Reader": fast_reader_rdr.__class__,
"status": "Success with fast reader (no guessing)",
}
)
except (
core.ParameterError,
cparser.CParserError,
UnicodeEncodeError,
) as err:
# special testing value to avoid falling back on the slow reader
if fast_reader["enable"] == "force":
raise core.InconsistentTableError(
f"fast reader {fast_reader_rdr.__class__} exception: {err}"
)
# If the fast reader doesn't work, try the slow version
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(new_kwargs),
"Reader": reader.__class__,
"status": (
"Success with slow reader after failing"
" with fast (no guessing)"
),
}
)
else:
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(new_kwargs),
"Reader": reader.__class__,
"status": "Success with specified Reader class (no guessing)",
}
)
# Static analysis (pyright) indicates `dat` might be left undefined, so just
# to be sure define it at the beginning and check here.
if dat is None:
raise RuntimeError(
"read() function failed due to code logic error, "
"please report this bug on github"
)
return dat
read.__doc__ = core.READ_DOCSTRING
def _guess(table, read_kwargs, format, fast_reader):
"""
Try to read the table using various sets of keyword args. Start with the
standard guess list and filter to make it unique and consistent with
user-supplied read keyword args. Finally, if none of those work then
try the original user-supplied keyword args.
Parameters
----------
table : str, file-like, list
Input table as a file name, file-like object, list of strings, or
single newline-separated string.
read_kwargs : dict
Keyword arguments from user to be supplied to reader
format : str
Table format
fast_reader : dict
Options for the C engine fast reader. See read() function for details.
Returns
-------
dat : `~astropy.table.Table` or None
Output table or None if only one guess format was available
"""
# Keep a trace of all failed guesses kwarg
failed_kwargs = []
# Get an ordered list of read() keyword arg dicts that will be cycled
# through in order to guess the format.
full_list_guess = _get_guess_kwargs_list(read_kwargs)
# If a fast version of the reader is available, try that before the slow version
if (
fast_reader["enable"]
and format is not None
and f"fast_{format}" in core.FAST_CLASSES
):
fast_kwargs = copy.deepcopy(read_kwargs)
fast_kwargs["Reader"] = core.FAST_CLASSES[f"fast_{format}"]
full_list_guess = [fast_kwargs] + full_list_guess
else:
fast_kwargs = None
# Filter the full guess list so that each entry is consistent with user kwarg inputs.
# This also removes any duplicates from the list.
filtered_guess_kwargs = []
fast_reader = read_kwargs.get("fast_reader")
for guess_kwargs in full_list_guess:
# If user specified slow reader then skip all fast readers
if (
fast_reader["enable"] is False
and guess_kwargs["Reader"] in core.FAST_CLASSES.values()
):
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"Reader": guess_kwargs["Reader"].__class__,
"status": "Disabled: reader only available in fast version",
"dt": f"{0.0:.3f} ms",
}
)
continue
# If user required a fast reader then skip all non-fast readers
if (
fast_reader["enable"] == "force"
and guess_kwargs["Reader"] not in core.FAST_CLASSES.values()
):
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"Reader": guess_kwargs["Reader"].__class__,
"status": "Disabled: no fast version of reader available",
"dt": f"{0.0:.3f} ms",
}
)
continue
guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs?
for key, val in read_kwargs.items():
# Do guess_kwargs.update(read_kwargs) except that if guess_args has
# a conflicting key/val pair then skip this guess entirely.
if key not in guess_kwargs:
guess_kwargs[key] = copy.deepcopy(val)
elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs:
guess_kwargs_ok = False
break
if not guess_kwargs_ok:
# User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g.
# user supplies delimiter="|" but the guess wants to try delimiter=" ",
# so skip the guess entirely.
continue
# Add the guess_kwargs to filtered list only if it is not already there.
if guess_kwargs not in filtered_guess_kwargs:
filtered_guess_kwargs.append(guess_kwargs)
# If there are not at least two formats to guess then return no table
# (None) to indicate that guessing did not occur. In that case the
# non-guess read() will occur and any problems will result in a more useful
# traceback.
if len(filtered_guess_kwargs) <= 1:
return None
# Define whitelist of exceptions that are expected from readers when
# processing invalid inputs. Note that OSError must fall through here
# so one cannot simply catch any exception.
guess_exception_classes = (
core.InconsistentTableError,
ValueError,
TypeError,
AttributeError,
core.OptionalTableImportError,
core.ParameterError,
cparser.CParserError,
)
# Now cycle through each possible reader and associated keyword arguments.
# Try to read the table using those args, and if an exception occurs then
# keep track of the failed guess and move on.
for guess_kwargs in filtered_guess_kwargs:
t0 = time.time()
try:
# If guessing will try all Readers then use strict req'ts on column names
if "Reader" not in read_kwargs:
guess_kwargs["strict_names"] = True
reader = get_reader(**guess_kwargs)
reader.guessing = True
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"Reader": reader.__class__,
"status": "Success (guessing)",
"dt": f"{(time.time() - t0) * 1000:.3f} ms",
}
)
return dat
except guess_exception_classes as err:
_read_trace.append(
{
"kwargs": copy.deepcopy(guess_kwargs),
"status": f"{err.__class__.__name__}: {str(err)}",
"dt": f"{(time.time() - t0) * 1000:.3f} ms",
}
)
failed_kwargs.append(guess_kwargs)
else:
# Failed all guesses, try the original read_kwargs without column requirements
try:
reader = get_reader(**read_kwargs)
dat = reader.read(table)
_read_trace.append(
{
"kwargs": copy.deepcopy(read_kwargs),
"Reader": reader.__class__,
"status": (
"Success with original kwargs without strict_names (guessing)"
),
}
)
return dat
except guess_exception_classes as err:
_read_trace.append(
{
"kwargs": copy.deepcopy(read_kwargs),
"status": f"{err.__class__.__name__}: {str(err)}",
}
)
failed_kwargs.append(read_kwargs)
lines = [
"\nERROR: Unable to guess table format with the guesses listed below:"
]
for kwargs in failed_kwargs:
sorted_keys = sorted(
x for x in sorted(kwargs) if x not in ("Reader", "Outputter")
)
reader_repr = repr(kwargs.get("Reader", basic.Basic))
keys_vals = ["Reader:" + re.search(r"\.(\w+)'>", reader_repr).group(1)]
kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys)
keys_vals.extend([f"{key}: {val!r}" for key, val in kwargs_sorted])
lines.append(" ".join(keys_vals))
msg = [
"",
"************************************************************************",
"** ERROR: Unable to guess table format with the guesses listed above. **",
"** **",
"** To figure out why the table did not read, use guess=False and **",
"** fast_reader=False, along with any appropriate arguments to read(). **",
"** In particular specify the format and any known attributes like the **",
"** delimiter. **",
"************************************************************************",
]
lines.extend(msg)
raise core.InconsistentTableError("\n".join(lines)) from None
def _get_guess_kwargs_list(read_kwargs):
"""Get the full list of reader keyword argument dicts.
These are the basis for the format guessing process.
The returned full list will then be:
- Filtered to be consistent with user-supplied kwargs
- Cleaned to have only unique entries
- Used one by one to try reading the input table
Note that the order of the guess list has been tuned over years of usage.
Maintainers need to be very careful about any adjustments as the
reasoning may not be immediately evident in all cases.
This list can (and usually does) include duplicates. This is a result
of the order tuning, but these duplicates get removed later.
Parameters
----------
read_kwargs : dict
User-supplied read keyword args
Returns
-------
guess_kwargs_list : list
List of read format keyword arg dicts
"""
guess_kwargs_list = []
# If the table is probably HTML based on some heuristics then start with the
# HTML reader.
if read_kwargs.pop("guess_html", None):
guess_kwargs_list.append({"Reader": html.HTML})
# Start with ECSV because an ECSV file will be read by Basic. This format
# has very specific header requirements and fails out quickly.
guess_kwargs_list.append({"Reader": ecsv.Ecsv})
# Now try readers that accept the user-supplied keyword arguments
# (actually include all here - check for compatibility of arguments later).
# FixedWidthTwoLine would also be read by Basic, so it needs to come first;
# same for RST.
for reader in (
fixedwidth.FixedWidthTwoLine,
rst.RST,
fastbasic.FastBasic,
basic.Basic,
fastbasic.FastRdb,
basic.Rdb,
fastbasic.FastTab,
basic.Tab,
cds.Cds,
mrt.Mrt,
daophot.Daophot,
sextractor.SExtractor,
ipac.Ipac,
latex.Latex,
latex.AASTex,
):
guess_kwargs_list.append({"Reader": reader})
# Cycle through the basic-style readers using all combinations of delimiter
# and quotechar.
for Reader in (
fastbasic.FastCommentedHeader,
basic.CommentedHeader,
fastbasic.FastBasic,
basic.Basic,
fastbasic.FastNoHeader,
basic.NoHeader,
):
for delimiter in ("|", ",", " ", r"\s"):
for quotechar in ('"', "'"):
guess_kwargs_list.append(
{"Reader": Reader, "delimiter": delimiter, "quotechar": quotechar}
)
return guess_kwargs_list
def _read_in_chunks(table, **kwargs):
"""
For fast_reader read the ``table`` in chunks and vstack to create
a single table, OR return a generator of chunk tables.
"""
fast_reader = kwargs["fast_reader"]
chunk_size = fast_reader.pop("chunk_size")
chunk_generator = fast_reader.pop("chunk_generator", False)
fast_reader["parallel"] = False # No parallel with chunks
tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs)
if chunk_generator:
return tbl_chunks
tbl0 = next(tbl_chunks)
masked = tbl0.masked
# Numpy won't allow resizing the original so make a copy here.
out_cols = {col.name: col.data.copy() for col in tbl0.itercols()}
str_kinds = ("S", "U")
for tbl in tbl_chunks:
masked |= tbl.masked
for name, col in tbl.columns.items():
# Concatenate current column data and new column data
# If one of the inputs is string-like and the other is not, then
# convert the non-string to a string. In a perfect world this would
# be handled by numpy, but as of numpy 1.13 this results in a string
# dtype that is too long (https://github.com/numpy/numpy/issues/10062).
col1, col2 = out_cols[name], col.data
if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds:
col2 = np.array(col2.tolist(), dtype=col1.dtype.kind)
elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds:
col1 = np.array(col1.tolist(), dtype=col2.dtype.kind)
# Choose either masked or normal concatenation
concatenate = np.ma.concatenate if masked else np.concatenate
out_cols[name] = concatenate([col1, col2])
# Make final table from numpy arrays, converting dict to list
out_cols = [out_cols[name] for name in tbl0.colnames]
out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta, copy=False)
return out
def _read_in_chunks_generator(table, chunk_size, **kwargs):
"""
For fast_reader read the ``table`` in chunks and return a generator
of tables for each chunk.
"""
@contextlib.contextmanager
def passthrough_fileobj(fileobj, encoding=None):
"""Stub for get_readable_fileobj, which does not seem to work in Py3
for input file-like object, see #6460.
"""
yield fileobj
# Set up to coerce `table` input into a readable file object by selecting
# an appropriate function.
# Convert table-as-string to a File object. Finding a newline implies
# that the string is not a filename.
if isinstance(table, str) and ("\n" in table or "\r" in table):
table = StringIO(table)
fileobj_context = passthrough_fileobj
elif hasattr(table, "read") and hasattr(table, "seek"):
fileobj_context = passthrough_fileobj
else:
# string filename or pathlib
fileobj_context = get_readable_fileobj
# Set up for iterating over chunks
kwargs["fast_reader"]["return_header_chars"] = True
header = "" # Table header (up to start of data)
prev_chunk_chars = "" # Chars from previous chunk after last newline
first_chunk = True # True for the first chunk, False afterward
with fileobj_context(table, encoding=kwargs.get("encoding")) as fh:
while True:
chunk = fh.read(chunk_size)
# Got fewer chars than requested, must be end of file
final_chunk = len(chunk) < chunk_size
# If this is the last chunk and there is only whitespace then break
if final_chunk and not re.search(r"\S", chunk):
break
# Step backwards from last character in chunk and find first newline
for idx in range(len(chunk) - 1, -1, -1):
if final_chunk or chunk[idx] == "\n":
break
else:
raise ValueError("no newline found in chunk (chunk_size too small?)")
# Stick on the header to the chunk part up to (and including) the
# last newline. Make sure the small strings are concatenated first.
complete_chunk = (header + prev_chunk_chars) + chunk[: idx + 1]
prev_chunk_chars = chunk[idx + 1 :]
# Now read the chunk as a complete table
tbl = read(complete_chunk, guess=False, **kwargs)
# For the first chunk pop the meta key which contains the header
# characters (everything up to the start of data) then fix kwargs
# so it doesn't return that in meta any more.
if first_chunk:
header = tbl.meta.pop("__ascii_fast_reader_header_chars__")
first_chunk = False
yield tbl
if final_chunk:
break
extra_writer_pars = (
"delimiter",
"comment",
"quotechar",
"formats",
"names",
"include_names",
"exclude_names",
"strip_whitespace",
)
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if "strip_whitespace" not in kwargs:
kwargs["strip_whitespace"] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if isinstance(
writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader)
) and not isinstance(kwargs.get("comment", ""), str):
raise ValueError(
"for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing."
)
return writer
def write(
table,
output=None,
format=None,
Writer=None,
fast_writer=True,
*,
overwrite=False,
**kwargs,
):
# Docstring inserted below
_validate_read_write_kwargs(
"write", format=format, fast_writer=fast_writer, overwrite=overwrite, **kwargs
)
if isinstance(output, (str, bytes, os.PathLike)):
output = os.path.expanduser(output)
if not overwrite and os.path.lexists(output):
raise OSError(NOT_OVERWRITING_MSG.format(output))
if output is None:
output = sys.stdout
# Ensure that `table` is a Table subclass.
names = kwargs.get("names")
if isinstance(table, Table):
# While we are only going to read data from columns, we may need to
# to adjust info attributes such as format, so we make a shallow copy.
table = table.__class__(table, names=names, copy=False)
else:
# Otherwise, create a table from the input.
table = Table(table, names=names, copy=False)
table0 = table[:0].copy()
core._apply_include_exclude_names(
table0,
kwargs.get("names"),
kwargs.get("include_names"),
kwargs.get("exclude_names"),
)
diff_format_with_names = set(kwargs.get("formats", [])) - set(table0.colnames)
if diff_format_with_names:
warnings.warn(
"The key(s) {} specified in the formats argument do not match a column"
" name.".format(diff_format_with_names),
AstropyWarning,
)
if table.has_mixin_columns:
fast_writer = False
Writer = _get_format_class(format, Writer, "Writer")
writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs)
if writer._format_name in core.FAST_CLASSES:
writer.write(table, output)
return
lines = writer.write(table)
# Write the lines to output
outstr = os.linesep.join(lines)
if not hasattr(output, "write"):
# NOTE: we need to specify newline='', otherwise the default
# behavior is for Python to translate \r\n (which we write because
# of os.linesep) into \r\r\n. Specifying newline='' disables any
# auto-translation.
output = open(output, "w", newline="")
output.write(outstr)
output.write(os.linesep)
output.close()
else:
output.write(outstr)
output.write(os.linesep)
write.__doc__ = core.WRITE_DOCSTRING
def get_read_trace():
"""
Return a traceback of the attempted read formats for the last call to
`~astropy.io.ascii.read` where guessing was enabled. This is primarily for
debugging.
The return value is a list of dicts, where each dict includes the keyword
args ``kwargs`` used in the read call and the returned ``status``.
Returns
-------
trace : list of dict
Ordered list of format guesses and status
"""
return copy.deepcopy(_read_trace)
|
72e217f6391e67be836c4dc2247016ffeb46178954b40637cb97b7c985228828 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for serializing astropy objects to YAML.
It provides functions `~astropy.io.misc.yaml.dump`,
`~astropy.io.misc.yaml.load`, and `~astropy.io.misc.yaml.load_all` which
call the corresponding functions in `PyYaml <https://pyyaml.org>`_ but use the
`~astropy.io.misc.yaml.AstropyDumper` and `~astropy.io.misc.yaml.AstropyLoader`
classes to define custom YAML tags for the following astropy classes:
- `astropy.units.Unit`
- `astropy.units.Quantity`
- `astropy.time.Time`
- `astropy.time.TimeDelta`
- `astropy.coordinates.SkyCoord`
- `astropy.coordinates.Angle`
- `astropy.coordinates.Latitude`
- `astropy.coordinates.Longitude`
- `astropy.coordinates.EarthLocation`
- `astropy.table.SerializedColumn`
Examples
--------
>>> from astropy.io.misc import yaml
>>> import astropy.units as u
>>> from astropy.time import Time
>>> from astropy.coordinates import EarthLocation
>>> t = Time(2457389.0, format='mjd',
... location=EarthLocation(1000, 2000, 3000, unit=u.km))
>>> td = yaml.dump(t)
>>> print(td)
!astropy.time.Time
format: mjd
in_subfmt: '*'
jd1: 4857390.0
jd2: -0.5
location: !astropy.coordinates.earth.EarthLocation
ellipsoid: WGS84
x: !astropy.units.Quantity
unit: &id001 !astropy.units.Unit {unit: km}
value: 1000.0
y: !astropy.units.Quantity
unit: *id001
value: 2000.0
z: !astropy.units.Quantity
unit: *id001
value: 3000.0
out_subfmt: '*'
precision: 3
scale: utc
>>> ty = yaml.load(td)
>>> ty
<Time object: scale='utc' format='mjd' value=2457389.0>
>>> ty.location # doctest: +FLOAT_CMP
<EarthLocation (1000., 2000., 3000.) km>
"""
import base64
import numpy as np
import yaml
from astropy import coordinates as coords
from astropy import units as u
from astropy.table import SerializedColumn
from astropy.time import Time, TimeDelta
__all__ = ["AstropyLoader", "AstropyDumper", "load", "load_all", "dump"]
def _unit_representer(dumper, obj):
out = {"unit": str(obj.to_string())}
return dumper.represent_mapping("!astropy.units.Unit", out)
def _unit_constructor(loader, node):
map = loader.construct_mapping(node)
return u.Unit(map["unit"], parse_strict="warn")
def _serialized_column_representer(dumper, obj):
out = dumper.represent_mapping("!astropy.table.SerializedColumn", obj)
return out
def _serialized_column_constructor(loader, node):
map = loader.construct_mapping(node)
return SerializedColumn(map)
def _time_representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping("!astropy.time.Time", out)
def _time_constructor(loader, node):
map = loader.construct_mapping(node)
out = Time.info._construct_from_dict(map)
return out
def _timedelta_representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping("!astropy.time.TimeDelta", out)
def _timedelta_constructor(loader, node):
map = loader.construct_mapping(node)
out = TimeDelta.info._construct_from_dict(map)
return out
def _ndarray_representer(dumper, obj):
if not (obj.flags["C_CONTIGUOUS"] or obj.flags["F_CONTIGUOUS"]):
obj = np.ascontiguousarray(obj)
if np.isfortran(obj):
obj = obj.T
order = "F"
else:
order = "C"
data_b64 = base64.b64encode(obj.tobytes())
out = {
"buffer": data_b64,
"dtype": str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr,
"shape": obj.shape,
"order": order,
}
return dumper.represent_mapping("!numpy.ndarray", out)
def _ndarray_constructor(loader, node):
# Convert mapping to a dict useful for initializing ndarray.
# Need deep=True since for structured dtype, the contents
# include lists and tuples, which need recursion via
# construct_sequence.
map = loader.construct_mapping(node, deep=True)
map["buffer"] = base64.b64decode(map["buffer"])
return np.ndarray(**map)
def _void_representer(dumper, obj):
data_b64 = base64.b64encode(obj.tobytes())
out = {
"buffer": data_b64,
"dtype": str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr,
}
return dumper.represent_mapping("!numpy.void", out)
def _void_constructor(loader, node):
# Interpret as node as an array scalar and then index to change to void.
map = loader.construct_mapping(node, deep=True)
map["buffer"] = base64.b64decode(map["buffer"])
return np.ndarray(shape=(), **map)[()]
def _quantity_representer(tag):
def representer(dumper, obj):
out = obj.info._represent_as_dict()
return dumper.represent_mapping(tag, out)
return representer
def _quantity_constructor(cls):
def constructor(loader, node):
map = loader.construct_mapping(node)
return cls.info._construct_from_dict(map)
return constructor
def _skycoord_representer(dumper, obj):
map = obj.info._represent_as_dict()
out = dumper.represent_mapping("!astropy.coordinates.sky_coordinate.SkyCoord", map)
return out
def _skycoord_constructor(loader, node):
map = loader.construct_mapping(node)
out = coords.SkyCoord.info._construct_from_dict(map)
return out
# Straight from yaml's Representer
def _complex_representer(self, data):
if data.imag == 0.0:
data = f"{data.real!r}"
elif data.real == 0.0:
data = f"{data.imag!r}j"
elif data.imag > 0:
data = f"{data.real!r}+{data.imag!r}j"
else:
data = f"{data.real!r}{data.imag!r}j"
return self.represent_scalar("tag:yaml.org,2002:python/complex", data)
def _complex_constructor(loader, node):
map = loader.construct_scalar(node)
return complex(map)
class AstropyLoader(yaml.SafeLoader):
"""
Custom SafeLoader that constructs astropy core objects as well
as Python tuple and unicode objects.
This class is not directly instantiated by user code, but instead is
used to maintain the available constructor functions that are
called when parsing a YAML stream. See the `PyYaml documentation
<https://pyyaml.org/wiki/PyYAMLDocumentation>`_ for details of the
class signature.
"""
def _construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def _construct_python_unicode(self, node):
return self.construct_scalar(node)
class AstropyDumper(yaml.SafeDumper):
"""
Custom SafeDumper that represents astropy core objects as well
as Python tuple and unicode objects.
This class is not directly instantiated by user code, but instead is
used to maintain the available representer functions that are
called when generating a YAML stream from an object. See the
`PyYaml documentation <https://pyyaml.org/wiki/PyYAMLDocumentation>`_
for details of the class signature.
"""
def _represent_tuple(self, data):
return self.represent_sequence("tag:yaml.org,2002:python/tuple", data)
AstropyDumper.add_multi_representer(u.UnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.FunctionUnitBase, _unit_representer)
AstropyDumper.add_multi_representer(u.StructuredUnit, _unit_representer)
AstropyDumper.add_representer(tuple, AstropyDumper._represent_tuple)
AstropyDumper.add_representer(np.ndarray, _ndarray_representer)
AstropyDumper.add_representer(np.void, _void_representer)
AstropyDumper.add_representer(Time, _time_representer)
AstropyDumper.add_representer(TimeDelta, _timedelta_representer)
AstropyDumper.add_representer(coords.SkyCoord, _skycoord_representer)
AstropyDumper.add_representer(SerializedColumn, _serialized_column_representer)
# Numpy dtypes
AstropyDumper.add_representer(np.bool_, yaml.representer.SafeRepresenter.represent_bool)
for np_type in [
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]:
AstropyDumper.add_representer(
np_type, yaml.representer.SafeRepresenter.represent_int
)
for np_type in [np.float_, np.float16, np.float32, np.float64, np.longdouble]:
AstropyDumper.add_representer(
np_type, yaml.representer.SafeRepresenter.represent_float
)
for np_type in [np.complex_, complex, np.complex64, np.complex128]:
AstropyDumper.add_representer(np_type, _complex_representer)
AstropyLoader.add_constructor("tag:yaml.org,2002:python/complex", _complex_constructor)
AstropyLoader.add_constructor(
"tag:yaml.org,2002:python/tuple", AstropyLoader._construct_python_tuple
)
AstropyLoader.add_constructor(
"tag:yaml.org,2002:python/unicode", AstropyLoader._construct_python_unicode
)
AstropyLoader.add_constructor("!astropy.units.Unit", _unit_constructor)
AstropyLoader.add_constructor("!numpy.ndarray", _ndarray_constructor)
AstropyLoader.add_constructor("!numpy.void", _void_constructor)
AstropyLoader.add_constructor("!astropy.time.Time", _time_constructor)
AstropyLoader.add_constructor("!astropy.time.TimeDelta", _timedelta_constructor)
AstropyLoader.add_constructor(
"!astropy.coordinates.sky_coordinate.SkyCoord", _skycoord_constructor
)
AstropyLoader.add_constructor(
"!astropy.table.SerializedColumn", _serialized_column_constructor
)
for cls, tag in (
(u.Quantity, "!astropy.units.Quantity"),
(u.Magnitude, "!astropy.units.Magnitude"),
(u.Dex, "!astropy.units.Dex"),
(u.Decibel, "!astropy.units.Decibel"),
(coords.Angle, "!astropy.coordinates.Angle"),
(coords.Latitude, "!astropy.coordinates.Latitude"),
(coords.Longitude, "!astropy.coordinates.Longitude"),
(coords.EarthLocation, "!astropy.coordinates.earth.EarthLocation"),
):
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
for cls in list(coords.representation.REPRESENTATION_CLASSES.values()) + list(
coords.representation.DIFFERENTIAL_CLASSES.values()
):
name = cls.__name__
# Add representations/differentials defined in astropy.
if name in coords.representation.__all__:
tag = "!astropy.coordinates." + name
AstropyDumper.add_multi_representer(cls, _quantity_representer(tag))
AstropyLoader.add_constructor(tag, _quantity_constructor(cls))
def load(stream):
"""Parse the first YAML document in a stream using the AstropyLoader and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load(stream, Loader=AstropyLoader)
def load_all(stream):
"""Parse the all YAML documents in a stream using the AstropyLoader class and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load_all(stream, Loader=AstropyLoader)
def dump(data, stream=None, **kwargs):
"""Serialize a Python object into a YAML stream using the AstropyDumper class.
If stream is None, return the produced string instead.
Parameters
----------
data : object
Object to serialize to YAML
stream : file-like, optional
YAML output (if not supplied a string is returned)
**kwargs
Other keyword arguments that get passed to yaml.dump()
Returns
-------
out : str or None
If no ``stream`` is supplied then YAML output is returned as str
"""
kwargs["Dumper"] = AstropyDumper
kwargs.setdefault("default_flow_style", None)
return yaml.dump(data, stream=stream, **kwargs)
|
bdbcc8173d43a253045d8b8e17c79412fa577886b93a2ae8b1dced3a7251d58c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Mixin columns for use in ascii/tests/test_ecsv.py, fits/tests/test_connect.py,
and misc/tests/test_hdf5.py.
All columns should have length 2.
"""
import numpy as np
from astropy import coordinates, table, time
from astropy import units as u
el = coordinates.EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sr = coordinates.SphericalRepresentation([0, 1] * u.deg, [2, 3] * u.deg, 1 * u.kpc)
cr = coordinates.CartesianRepresentation([0, 1] * u.pc, [4, 5] * u.pc, [8, 6] * u.pc)
sd = coordinates.SphericalCosLatDifferential(
[0, 1] * u.mas / u.yr, [0, 1] * u.mas / u.yr, 10 * u.km / u.s
)
srd = coordinates.SphericalRepresentation(sr, differentials=sd)
sc = coordinates.SkyCoord(
[1, 2], [3, 4], unit="deg,deg", frame="fk4", obstime="J1990.5"
)
scd = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit="deg,deg,m", frame="fk4", obstime=["J1990.5"] * 2
)
scdc = scd.copy()
scdc.representation_type = "cartesian"
scpm = coordinates.SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,pc",
pm_ra_cosdec=[7, 8] * u.mas / u.yr,
pm_dec=[9, 10] * u.mas / u.yr,
)
scpmrv = coordinates.SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,pc",
pm_ra_cosdec=[7, 8] * u.mas / u.yr,
pm_dec=[9, 10] * u.mas / u.yr,
radial_velocity=[11, 12] * u.km / u.s,
)
scrv = coordinates.SkyCoord(
[1, 2], [3, 4], [5, 6], unit="deg,deg,pc", radial_velocity=[11, 12] * u.km / u.s
)
tm = time.Time(
[51000.5, 51001.5], format="mjd", scale="tai", precision=5, location=el[0]
)
tm2 = time.Time(tm, precision=3, format="iso")
tm3 = time.Time(tm, location=el)
tm3.info.serialize_method["ecsv"] = "jd1_jd2"
obj = table.Column([{"a": 1}, {"b": [2]}], dtype="object")
su = table.Column(
[(1, (1.5, 1.6)), (2, (2.5, 2.6))],
name="su",
dtype=[("i", np.int64), ("f", [("p1", np.float64), ("p0", np.float64)])],
)
su2 = table.Column(
[(["snake", "c"], [1.6, 1.5]), (["eal", "a"], [2.5, 2.6])],
dtype=[("name", "U5", (2,)), ("f", "f8", (2,))],
)
stokes = coordinates.StokesCoord(["RR", "LL"])
# NOTE: for testing, the name of the column "x" for the
# Quantity is important since it tests the fix for #10215
# (namespace clash, where "x" clashes with "el.x").
mixin_cols = {
"tm": tm,
"tm2": tm2,
"tm3": tm3,
"dt": time.TimeDelta([1, 2] * u.day),
"sc": sc,
"scd": scd,
"scdc": scdc,
"scpm": scpm,
"scpmrv": scpmrv,
"scrv": scrv,
"x": [1, 2] * u.m,
"qdb": [10, 20] * u.dB(u.mW),
"qdex": [4.5, 5.5] * u.dex(u.cm / u.s**2),
"qmag": [21, 22] * u.ABmag,
"lat": coordinates.Latitude([1, 2] * u.deg),
"lon": coordinates.Longitude([1, 2] * u.deg, wrap_angle=180.0 * u.deg),
"ang": coordinates.Angle([1, 2] * u.deg),
"el": el,
"sr": sr,
"cr": cr,
"sd": sd,
"srd": srd,
"nd": table.NdarrayMixin([1, 2]),
"obj": obj,
"su": su,
"su2": su2,
"stokes": stokes,
}
time_attrs = [
"value",
"shape",
"format",
"scale",
"precision",
"in_subfmt",
"out_subfmt",
"location",
]
compare_attrs = {
"tm": time_attrs,
"tm2": time_attrs,
"tm3": time_attrs,
"dt": ["shape", "value", "format", "scale"],
"sc": ["ra", "dec", "representation_type", "frame.name"],
"scd": ["ra", "dec", "distance", "representation_type", "frame.name"],
"scdc": ["x", "y", "z", "representation_type", "frame.name"],
"scpm": [
"ra",
"dec",
"distance",
"pm_ra_cosdec",
"pm_dec",
"representation_type",
"frame.name",
],
"scpmrv": [
"ra",
"dec",
"distance",
"pm_ra_cosdec",
"pm_dec",
"radial_velocity",
"representation_type",
"frame.name",
],
"scrv": [
"ra",
"dec",
"distance",
"radial_velocity",
"representation_type",
"frame.name",
],
"x": ["value", "unit"],
"qdb": ["value", "unit"],
"qdex": ["value", "unit"],
"qmag": ["value", "unit"],
"lon": ["value", "unit", "wrap_angle"],
"lat": ["value", "unit"],
"ang": ["value", "unit"],
"el": ["x", "y", "z", "ellipsoid"],
"nd": ["data"],
"sr": ["lon", "lat", "distance"],
"cr": ["x", "y", "z"],
"sd": ["d_lon_coslat", "d_lat", "d_distance"],
"srd": [
"lon",
"lat",
"distance",
"differentials.s.d_lon_coslat",
"differentials.s.d_lat",
"differentials.s.d_distance",
],
"obj": [],
"su": ["i", "f.p0", "f.p1"],
"su2": ["name", "f"],
"stokes": ["value"],
}
non_trivial_names = {
"cr": ["cr.x", "cr.y", "cr.z"],
"dt": ["dt.jd1", "dt.jd2"],
"el": ["el.x", "el.y", "el.z"],
"sc": ["sc.ra", "sc.dec"],
"scd": ["scd.ra", "scd.dec", "scd.distance", "scd.obstime.jd1", "scd.obstime.jd2"],
"scdc": ["scdc.x", "scdc.y", "scdc.z", "scdc.obstime.jd1", "scdc.obstime.jd2"],
"scfc": ["scdc.x", "scdc.y", "scdc.z", "scdc.obstime.jd1", "scdc.obstime.jd2"],
"scpm": [
"scpm.ra",
"scpm.dec",
"scpm.distance",
"scpm.pm_ra_cosdec",
"scpm.pm_dec",
],
"scpmrv": [
"scpmrv.ra",
"scpmrv.dec",
"scpmrv.distance",
"scpmrv.pm_ra_cosdec",
"scpmrv.pm_dec",
"scpmrv.radial_velocity",
],
"scrv": ["scrv.ra", "scrv.dec", "scrv.distance", "scrv.radial_velocity"],
"sd": ["sd.d_lon_coslat", "sd.d_lat", "sd.d_distance"],
"sr": ["sr.lon", "sr.lat", "sr.distance"],
"srd": [
"srd.lon",
"srd.lat",
"srd.distance",
"srd.differentials.s.d_lon_coslat",
"srd.differentials.s.d_lat",
"srd.differentials.s.d_distance",
],
"su": ["su.i", "su.f.p1", "su.f.p0"],
"su2": ["su2.name", "su2.f"],
"tm": ["tm.jd1", "tm.jd2"],
"tm2": ["tm2.jd1", "tm2.jd2"],
"tm3": ["tm3.jd1", "tm3.jd2", "tm3.location.x", "tm3.location.y", "tm3.location.z"],
}
serialized_names = {
name: non_trivial_names.get(name, [name]) for name in sorted(mixin_cols)
}
|
4b503534f9bfc4fda9b649b36361e91b2dc50c8953df71bb41041810c072568c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
from collections import OrderedDict
from .base import IORegistryError, _UnifiedIORegistryBase
__all__ = ["UnifiedIORegistry", "UnifiedInputRegistry", "UnifiedOutputRegistry"]
PATH_TYPES = (str, os.PathLike) # TODO! include bytes
def _expand_user_in_args(args):
# Conservatively attempt to apply `os.path.expanduser` to the first
# argument, which can be either a path or the contents of a table.
if len(args) and isinstance(args[0], PATH_TYPES):
ex_user = os.path.expanduser(args[0])
if ex_user != args[0] and os.path.exists(os.path.dirname(ex_user)):
args = (ex_user,) + args[1:]
return args
# -----------------------------------------------------------------------------
class UnifiedInputRegistry(_UnifiedIORegistryBase):
"""Read-only Unified Registry.
.. versionadded:: 5.0
Examples
--------
First let's start by creating a read-only registry.
.. code-block:: python
>>> from astropy.io.registry import UnifiedInputRegistry
>>> read_reg = UnifiedInputRegistry()
There is nothing in this registry. Let's make a reader for the
:class:`~astropy.table.Table` class::
from astropy.table import Table
def my_table_reader(filename, some_option=1):
# Read in the table by any means necessary
return table # should be an instance of Table
Such a function can then be registered with the I/O registry::
read_reg.register_reader('my-table-format', Table, my_table_reader)
Note that we CANNOT then read in a table with::
d = Table.read('my_table_file.mtf', format='my-table-format')
Why? because ``Table.read`` uses Astropy's default global registry and this
is a separate registry.
Instead we can read by the read method on the registry::
d = read_reg.read(Table, 'my_table_file.mtf', format='my-table-format')
"""
def __init__(self):
super().__init__() # set _identifiers
self._readers = OrderedDict()
self._registries["read"] = {"attr": "_readers", "column": "Read"}
self._registries_order = ("read", "identify")
# =========================================================================
# Read methods
def register_reader(
self, data_format, data_class, function, force=False, priority=0
):
"""
Register a reader function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when reading.
data_class : class
The class of the object that the reader produces.
function : function
The function to read in a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the reader, used to compare possible formats when
trying to determine the best reader to use. Higher priorities are
preferred over lower priorities, with the default priority being 0
(negative numbers are allowed though).
"""
if (data_format, data_class) not in self._readers or force:
self._readers[(data_format, data_class)] = function, priority
else:
raise IORegistryError(
f"Reader for format '{data_format}' and class '{data_class.__name__}'"
" is already defined"
)
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, "read")
def unregister_reader(self, data_format, data_class):
"""
Unregister a reader function.
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that the reader produces.
"""
if (data_format, data_class) in self._readers:
self._readers.pop((data_format, data_class))
else:
raise IORegistryError(
f"No reader defined for format '{data_format}' and class"
f" '{data_class.__name__}'"
)
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, "read")
def get_reader(self, data_format, data_class):
"""Get reader for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
reader : callable
The registered reader function for this format and class.
"""
readers = [(fmt, cls) for fmt, cls in self._readers if fmt == data_format]
for reader_format, reader_class in readers:
if self._is_best_match(data_class, reader_class, readers):
return self._readers[(reader_format, reader_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, "Read")
raise IORegistryError(
f"No reader defined for format '{data_format}' and class"
f" '{data_class.__name__}'.\n\nThe available formats"
f" are:\n\n{format_table_str}"
)
def read(self, cls, *args, format=None, cache=False, **kwargs):
"""
Read in data.
Parameters
----------
cls : class
*args
The arguments passed to this method depend on the format.
format : str or None
cache : bool
Whether to cache the results of reading in the data.
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered reader.
"""
ctx = None
try:
# Expand a tilde-prefixed path if present in args[0]
args = _expand_user_in_args(args)
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES) and not os.path.isdir(args[0]):
from astropy.utils.data import get_readable_fileobj
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
try:
ctx = get_readable_fileobj(
args[0], encoding="binary", cache=cache
)
fileobj = ctx.__enter__()
except OSError:
raise
except Exception:
fileobj = None
else:
args = [fileobj] + list(args[1:])
elif hasattr(args[0], "read"):
path = None
fileobj = args[0]
format = self._get_valid_format(
"read", cls, path, fileobj, args, kwargs
)
reader = self.get_reader(format, cls)
data = reader(*args, **kwargs)
if not isinstance(data, cls):
# User has read with a subclass where only the parent class is
# registered. This returns the parent class, so try coercing
# to desired subclass.
try:
data = cls(data)
except Exception:
raise TypeError(
f"could not convert reader output to {cls.__name__} class."
)
finally:
if ctx is not None:
ctx.__exit__(*sys.exc_info())
return data
# -----------------------------------------------------------------------------
class UnifiedOutputRegistry(_UnifiedIORegistryBase):
"""Write-only Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._writers = OrderedDict()
self._registries["write"] = {"attr": "_writers", "column": "Write"}
self._registries_order = ("write", "identify")
# =========================================================================
# Write Methods
def register_writer(
self, data_format, data_class, function, force=False, priority=0
):
"""
Register a table writer function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when writing.
data_class : class
The class of the object that can be written.
function : function
The function to write out a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the writer, used to compare possible formats when trying
to determine the best writer to use. Higher priorities are preferred
over lower priorities, with the default priority being 0 (negative
numbers are allowed though).
"""
if not (data_format, data_class) in self._writers or force: # noqa: E713
self._writers[(data_format, data_class)] = function, priority
else:
raise IORegistryError(
f"Writer for format '{data_format}' and class '{data_class.__name__}'"
" is already defined"
)
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, "write")
def unregister_writer(self, data_format, data_class):
"""
Unregister a writer function.
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that can be written.
"""
if (data_format, data_class) in self._writers:
self._writers.pop((data_format, data_class))
else:
raise IORegistryError(
f"No writer defined for format '{data_format}' and class"
f" '{data_class.__name__}'"
)
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, "write")
def get_writer(self, data_format, data_class):
"""Get writer for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
writer : callable
The registered writer function for this format and class.
"""
writers = [(fmt, cls) for fmt, cls in self._writers if fmt == data_format]
for writer_format, writer_class in writers:
if self._is_best_match(data_class, writer_class, writers):
return self._writers[(writer_format, writer_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, "Write")
raise IORegistryError(
f"No writer defined for format '{data_format}' and class"
f" '{data_class.__name__}'.\n\nThe available formats"
f" are:\n\n{format_table_str}"
)
def write(self, data, *args, format=None, **kwargs):
"""
Write out data.
Parameters
----------
data : object
The data to write.
*args
The arguments passed to this method depend on the format.
format : str or None
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered writer. Most often `None`.
.. versionadded:: 4.3
"""
# Expand a tilde-prefixed path if present in args[0]
args = _expand_user_in_args(args)
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
fileobj = None
elif hasattr(args[0], "read"):
path = None
fileobj = args[0]
format = self._get_valid_format(
"write", data.__class__, path, fileobj, args, kwargs
)
writer = self.get_writer(format, data.__class__)
return writer(data, *args, **kwargs)
# -----------------------------------------------------------------------------
class UnifiedIORegistry(UnifiedInputRegistry, UnifiedOutputRegistry):
"""Unified I/O Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._registries_order = ("read", "write", "identify")
def get_formats(self, data_class=None, readwrite=None):
"""
Get the list of registered I/O formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class, optional
Filter readers/writer to match data class (default = all classes).
readwrite : str or None, optional
Search only for readers (``"Read"``) or writers (``"Write"``).
If None search for both. Default is None.
.. versionadded:: 1.3
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
"""
return super().get_formats(data_class, readwrite)
|
8148badf663949a00da2230e067deaee90daacf9f3bd14eba8f0177907121dd0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import contextlib
import re
import warnings
from collections import OrderedDict
from operator import itemgetter
import numpy as np
__all__ = ["IORegistryError"]
class IORegistryError(Exception):
"""Custom error for registry clashes."""
pass
# -----------------------------------------------------------------------------
class _UnifiedIORegistryBase:
"""Base class for registries in Astropy's Unified IO.
This base class provides identification functions and miscellaneous
utilities. For an example how to build a registry subclass we suggest
:class:`~astropy.io.registry.UnifiedInputRegistry`, which enables
read-only registries. These higher-level subclasses will probably serve
better as a baseclass, for instance
:class:`~astropy.io.registry.UnifiedIORegistry` subclasses both
:class:`~astropy.io.registry.UnifiedInputRegistry` and
:class:`~astropy.io.registry.UnifiedOutputRegistry` to enable both
reading from and writing to files.
.. versionadded:: 5.0
"""
def __init__(self):
# registry of identifier functions
self._identifiers = OrderedDict()
# what this class can do: e.g. 'read' &/or 'write'
self._registries = {}
self._registries["identify"] = {
"attr": "_identifiers",
"column": "Auto-identify",
}
self._registries_order = ("identify",) # match keys in `_registries`
# If multiple formats are added to one class the update of the docs is quite
# expensive. Classes for which the doc update is temporarily delayed are added
# to this set.
self._delayed_docs_classes = set()
@property
def available_registries(self):
"""Available registries.
Returns
-------
``dict_keys``
"""
return self._registries.keys()
def get_formats(self, data_class=None, filter_on=None):
"""
Get the list of registered formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class or None, optional
Filter readers/writer to match data class (default = all classes).
filter_on : str or None, optional
Which registry to show. E.g. "identify"
If None search for both. Default is None.
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
Raises
------
ValueError
If ``filter_on`` is not None nor a registry name.
"""
from astropy.table import Table
# set up the column names
colnames = (
"Data class",
"Format",
*[self._registries[k]["column"] for k in self._registries_order],
"Deprecated",
)
i_dataclass = colnames.index("Data class")
i_format = colnames.index("Format")
i_regstart = colnames.index(
self._registries[self._registries_order[0]]["column"]
)
i_deprecated = colnames.index("Deprecated")
# registries
regs = set()
for k in self._registries.keys() - {"identify"}:
regs |= set(getattr(self, self._registries[k]["attr"]))
format_classes = sorted(regs, key=itemgetter(0))
# the format classes from all registries except "identify"
rows = []
for fmt, cls in format_classes:
# see if can skip, else need to document in row
if data_class is not None and not self._is_best_match(
data_class, cls, format_classes
):
continue
# flags for each registry
has_ = {
k: "Yes" if (fmt, cls) in getattr(self, v["attr"]) else "No"
for k, v in self._registries.items()
}
# Check if this is a short name (e.g. 'rdb') which is deprecated in
# favor of the full 'ascii.rdb'.
ascii_format_class = ("ascii." + fmt, cls)
# deprecation flag
deprecated = "Yes" if ascii_format_class in format_classes else ""
# add to rows
rows.append(
(
cls.__name__,
fmt,
*[has_[n] for n in self._registries_order],
deprecated,
)
)
# filter_on can be in self_registries_order or None
if str(filter_on).lower() in self._registries_order:
index = self._registries_order.index(str(filter_on).lower())
rows = [row for row in rows if row[i_regstart + index] == "Yes"]
elif filter_on is not None:
raise ValueError(
'unrecognized value for "filter_on": {0}.\n'
f"Allowed are {self._registries_order} and None."
)
# Sorting the list of tuples is much faster than sorting it after the
# table is created. (#5262)
if rows:
# Indices represent "Data Class", "Deprecated" and "Format".
data = list(
zip(*sorted(rows, key=itemgetter(i_dataclass, i_deprecated, i_format)))
)
else:
data = None
# make table
# need to filter elementwise comparison failure issue
# https://github.com/numpy/numpy/issues/6784
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
format_table = Table(data, names=colnames)
if not np.any(format_table["Deprecated"].data == "Yes"):
format_table.remove_column("Deprecated")
return format_table
@contextlib.contextmanager
def delay_doc_updates(self, cls):
"""Contextmanager to disable documentation updates when registering
reader and writer. The documentation is only built once when the
contextmanager exits.
.. versionadded:: 1.3
Parameters
----------
cls : class
Class for which the documentation updates should be delayed.
Notes
-----
Registering multiple readers and writers can cause significant overhead
because the documentation of the corresponding ``read`` and ``write``
methods are build every time.
Examples
--------
see for example the source code of ``astropy.table.__init__``.
"""
self._delayed_docs_classes.add(cls)
yield
self._delayed_docs_classes.discard(cls)
for method in self._registries.keys() - {"identify"}:
self._update__doc__(cls, method)
# =========================================================================
# Identifier methods
def register_identifier(self, data_format, data_class, identifier, force=False):
"""
Associate an identifier function with a specific data type.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
identifier : function
A function that checks the argument specified to `read` or `write` to
determine whether the input can be interpreted as a table of type
``data_format``. This function should take the following arguments:
- ``origin``: A string ``"read"`` or ``"write"`` identifying whether
the file is to be opened for reading or writing.
- ``path``: The path to the file.
- ``fileobj``: An open file object to read the file's contents, or
`None` if the file could not be opened.
- ``*args``: Positional arguments for the `read` or `write`
function.
- ``**kwargs``: Keyword arguments for the `read` or `write`
function.
One or both of ``path`` or ``fileobj`` may be `None`. If they are
both `None`, the identifier will need to work from ``args[0]``.
The function should return True if the input can be identified
as being of format ``data_format``, and False otherwise.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
Examples
--------
To set the identifier based on extensions, for formats that take a
filename as a first argument, you can do for example
.. code-block:: python
from astropy.io.registry import register_identifier
from astropy.table import Table
def my_identifier(*args, **kwargs):
return isinstance(args[0], str) and args[0].endswith('.tbl')
register_identifier('ipac', Table, my_identifier)
unregister_identifier('ipac', Table)
"""
if not (data_format, data_class) in self._identifiers or force: # noqa: E713
self._identifiers[(data_format, data_class)] = identifier
else:
raise IORegistryError(
f"Identifier for format {data_format!r} and class"
f" {data_class.__name__!r} is already defined"
)
def unregister_identifier(self, data_format, data_class):
"""
Unregister an identifier function.
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that can be read/written.
"""
if (data_format, data_class) in self._identifiers:
self._identifiers.pop((data_format, data_class))
else:
raise IORegistryError(
f"No identifier defined for format {data_format!r} and class"
f" {data_class.__name__!r}"
)
def identify_format(self, origin, data_class_required, path, fileobj, args, kwargs):
"""Loop through identifiers to see which formats match.
Parameters
----------
origin : str
A string ``"read`` or ``"write"`` identifying whether the file is to be
opened for reading or writing.
data_class_required : object
The specified class for the result of `read` or the class that is to be
written.
path : str or path-like or None
The path to the file or None.
fileobj : file-like or None.
An open file object to read the file's contents, or ``None`` if the
file could not be opened.
args : sequence
Positional arguments for the `read` or `write` function. Note that
these must be provided as sequence.
kwargs : dict-like
Keyword arguments for the `read` or `write` function. Note that this
parameter must be `dict`-like.
Returns
-------
valid_formats : list
List of matching formats.
"""
valid_formats = []
for data_format, data_class in self._identifiers:
if self._is_best_match(data_class_required, data_class, self._identifiers):
if self._identifiers[(data_format, data_class)](
origin, path, fileobj, *args, **kwargs
):
valid_formats.append(data_format)
return valid_formats
# =========================================================================
# Utils
def _get_format_table_str(self, data_class, filter_on):
"""``get_formats()``, without column "Data class", as a str."""
format_table = self.get_formats(data_class, filter_on)
format_table.remove_column("Data class")
format_table_str = "\n".join(format_table.pformat(max_lines=-1))
return format_table_str
def _is_best_match(self, class1, class2, format_classes):
"""Determine if class2 is the "best" match for class1 in the list of classes.
It is assumed that (class2 in classes) is True.
class2 is the the best match if:
- ``class1`` is a subclass of ``class2`` AND
- ``class2`` is the nearest ancestor of ``class1`` that is in classes
(which includes the case that ``class1 is class2``)
"""
if issubclass(class1, class2):
classes = {cls for fmt, cls in format_classes}
for parent in class1.__mro__:
if parent is class2: # class2 is closest registered ancestor
return True
if parent in classes: # class2 was superseded
return False
return False
def _get_valid_format(self, mode, cls, path, fileobj, args, kwargs):
"""
Returns the first valid format that can be used to read/write the data in
question. Mode can be either 'read' or 'write'.
"""
valid_formats = self.identify_format(mode, cls, path, fileobj, args, kwargs)
if len(valid_formats) == 0:
format_table_str = self._get_format_table_str(cls, mode.capitalize())
raise IORegistryError(
"Format could not be identified based on the"
" file name or contents, please provide a"
" 'format' argument.\n"
f"The available formats are:\n{format_table_str}"
)
elif len(valid_formats) > 1:
return self._get_highest_priority_format(mode, cls, valid_formats)
return valid_formats[0]
def _get_highest_priority_format(self, mode, cls, valid_formats):
"""
Returns the reader or writer with the highest priority. If it is a tie,
error.
"""
if mode == "read":
format_dict = self._readers
mode_loader = "reader"
elif mode == "write":
format_dict = self._writers
mode_loader = "writer"
best_formats = []
current_priority = -np.inf
for format in valid_formats:
try:
_, priority = format_dict[(format, cls)]
except KeyError:
# We could throw an exception here, but get_reader/get_writer handle
# this case better, instead maximally deprioritise the format.
priority = -np.inf
if priority == current_priority:
best_formats.append(format)
elif priority > current_priority:
best_formats = [format]
current_priority = priority
if len(best_formats) > 1:
raise IORegistryError(
"Format is ambiguous - options are:"
f" {', '.join(sorted(valid_formats, key=itemgetter(0)))}"
)
return best_formats[0]
def _update__doc__(self, data_class, readwrite):
"""
Update the docstring to include all the available readers / writers for
the ``data_class.read``/``data_class.write`` functions (respectively).
Don't update if the data_class does not have the relevant method.
"""
# abort if method "readwrite" isn't on data_class
if not hasattr(data_class, readwrite):
return
from .interface import UnifiedReadWrite
FORMATS_TEXT = "The available built-in formats are:"
# Get the existing read or write method and its docstring
class_readwrite_func = getattr(data_class, readwrite)
if not isinstance(class_readwrite_func.__doc__, str):
# No docstring--could just be test code, or possibly code compiled
# without docstrings
return
lines = class_readwrite_func.__doc__.splitlines()
# Find the location of the existing formats table if it exists
sep_indices = [ii for ii, line in enumerate(lines) if FORMATS_TEXT in line]
if sep_indices:
# Chop off the existing formats table, including the initial blank line
chop_index = sep_indices[0]
lines = lines[:chop_index]
# Find the minimum indent, skipping the first line because it might be odd
matches = [re.search(r"(\S)", line) for line in lines[1:]]
left_indent = " " * min(match.start() for match in matches if match)
# Get the available unified I/O formats for this class
# Include only formats that have a reader, and drop the 'Data class' column
format_table = self.get_formats(data_class, readwrite.capitalize())
format_table.remove_column("Data class")
# Get the available formats as a table, then munge the output of pformat()
# a bit and put it into the docstring.
new_lines = format_table.pformat(max_lines=-1, max_width=80)
table_rst_sep = re.sub("-", "=", new_lines[1])
new_lines[1] = table_rst_sep
new_lines.insert(0, table_rst_sep)
new_lines.append(table_rst_sep)
# Check for deprecated names and include a warning at the end.
if "Deprecated" in format_table.colnames:
new_lines.extend(
[
"",
"Deprecated format names like ``aastex`` will be "
"removed in a future version. Use the full ",
"name (e.g. ``ascii.aastex``) instead.",
]
)
new_lines = [FORMATS_TEXT, ""] + new_lines
lines.extend([left_indent + line for line in new_lines])
# Depending on Python version and whether class_readwrite_func is
# an instancemethod or classmethod, one of the following will work.
if isinstance(class_readwrite_func, UnifiedReadWrite):
class_readwrite_func.__class__.__doc__ = "\n".join(lines)
else:
try:
class_readwrite_func.__doc__ = "\n".join(lines)
except AttributeError:
class_readwrite_func.__func__.__doc__ = "\n".join(lines)
|
8d4a6dd1b36a646411b0c66b702094744168fca643517d9a0f1b66360553986f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from astropy.io import registry as io_registry
from astropy.table import Table
from astropy.table.column import BaseColumn
from astropy.units import Quantity
from astropy.utils.misc import NOT_OVERWRITING_MSG
from . import from_table, parse
from .tree import Table as VOTable
from .tree import VOTableFile
def is_votable(origin, filepath, fileobj, *args, **kwargs):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a VOTABLE_ xml file.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
from . import is_votable
if origin == "read":
if fileobj is not None:
try:
result = is_votable(fileobj)
finally:
fileobj.seek(0)
return result
elif filepath is not None:
return is_votable(filepath)
return isinstance(args[0], (VOTableFile, VOTable))
else:
return False
def read_table_votable(
input, table_id=None, use_names_over_ids=False, verify=None, **kwargs
):
"""
Read a Table object from an VO table file.
Parameters
----------
input : str or `~astropy.io.votable.tree.VOTableFile` or `~astropy.io.votable.tree.Table`
If a string, the filename to read the table from. If a
:class:`~astropy.io.votable.tree.VOTableFile` or
:class:`~astropy.io.votable.tree.Table` object, the object to extract
the table from.
table_id : str or int, optional
The table to read in. If a `str`, it is an ID corresponding
to the ID of the table in the file (not all VOTable files
assign IDs to their tables). If an `int`, it is the index of
the table in the file, starting at 0.
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the names
of columns in the `~astropy.table.Table` instance. Since names
are not guaranteed to be unique, this may cause some columns
to be renamed by appending numbers to the end. Otherwise
(default), use the ID attributes as the column names.
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to ``'ignore'``.
**kwargs
Additional keyword arguments are passed on to
:func:`astropy.io.votable.table.parse`.
"""
if not isinstance(input, (VOTableFile, VOTable)):
input = parse(input, table_id=table_id, verify=verify, **kwargs)
# Parse all table objects
table_id_mapping = {}
tables = []
if isinstance(input, VOTableFile):
for table in input.iter_tables():
if table.ID is not None:
table_id_mapping[table.ID] = table
tables.append(table)
if len(tables) > 1:
if table_id is None:
raise ValueError(
"Multiple tables found: table id should be set via the table_id="
" argument. The available tables are"
f" {', '.join(table_id_mapping)}, or integers less than"
f" {len(tables)}."
)
elif isinstance(table_id, str):
if table_id in table_id_mapping:
table = table_id_mapping[table_id]
else:
raise ValueError(f"No tables with id={table_id} found")
elif isinstance(table_id, int):
if table_id < len(tables):
table = tables[table_id]
else:
raise IndexError(
f"Table index {table_id} is out of range. {len(tables)} tables"
" found"
)
elif len(tables) == 1:
table = tables[0]
else:
raise ValueError("No table found")
elif isinstance(input, VOTable):
table = input
# Convert to an astropy.table.Table object
return table.to_table(use_names_over_ids=use_names_over_ids)
def write_table_votable(
input, output, table_id=None, overwrite=False, tabledata_format=None
):
"""
Write a Table object to an VO table file.
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
table_id : str, optional
The table ID to use. If this is not specified, the 'ID' keyword in the
``meta`` object of the table will be used.
overwrite : bool, optional
Whether to overwrite any existing file without warning.
tabledata_format : str, optional
The format of table data to write. Must be one of ``tabledata``
(text representation), ``binary`` or ``binary2``. Default is
``tabledata``. See :ref:`astropy:votable-serialization`.
"""
# Only those columns which are instances of BaseColumn or Quantity can be written
unsupported_cols = input.columns.not_isinstance((BaseColumn, Quantity))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(
f"cannot write table with mixin column(s) {unsupported_names} to VOTable"
)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# Create a new VOTable file
table_file = from_table(input, table_id=table_id)
# Write out file
table_file.to_xml(output, tabledata_format=tabledata_format)
io_registry.register_reader("votable", Table, read_table_votable)
io_registry.register_writer("votable", Table, write_table_votable)
io_registry.register_identifier("votable", Table, is_votable)
|
9a21878eceb02cca5be8d72d6bc6a537777fabd173da8067baed861ab25a8723 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: Test FITS parsing
# STDLIB
import base64
import codecs
import gzip
import io
import re
import urllib.request
import warnings
# THIRD-PARTY
import numpy as np
from numpy import ma
# LOCAL
from astropy import __version__ as astropy_version
from astropy.io import fits
from astropy.utils.collections import HomogeneousList
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.xml.writer import XMLWriter
from . import converters
from . import ucd as ucd_mod
from . import util, xmlutil
from .exceptions import (
E06,
E08,
E09,
E10,
E11,
E12,
E13,
E15,
E16,
E17,
E18,
E19,
E20,
E21,
E22,
E23,
E25,
W06,
W07,
W08,
W09,
W10,
W11,
W12,
W13,
W15,
W17,
W18,
W19,
W20,
W21,
W22,
W26,
W27,
W28,
W29,
W32,
W33,
W35,
W36,
W37,
W38,
W40,
W41,
W42,
W43,
W44,
W45,
W50,
W52,
W53,
W54,
vo_raise,
vo_reraise,
vo_warn,
warn_or_raise,
warn_unknown_attrs,
)
try:
from . import tablewriter
_has_c_tabledata_writer = True
except ImportError:
_has_c_tabledata_writer = False
__all__ = [
"Link",
"Info",
"Values",
"Field",
"Param",
"CooSys",
"TimeSys",
"FieldRef",
"ParamRef",
"Group",
"Table",
"Resource",
"VOTableFile",
"Element",
]
# The default number of rows to read in each chunk before converting
# to an array.
DEFAULT_CHUNK_SIZE = 256
RESIZE_AMOUNT = 1.5
######################################################################
# FACTORY FUNCTIONS
def _resize(masked, new_size):
"""
Masked arrays can not be resized inplace, and `np.resize` and
`ma.resize` are both incompatible with structured arrays.
Therefore, we do all this.
"""
new_array = ma.zeros((new_size,), dtype=masked.dtype)
length = min(len(masked), new_size)
new_array[:length] = masked[:length]
return new_array
def _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):
"""
Creates a function useful for looking up an element by a given
attribute.
Parameters
----------
attr : str
The attribute name
unique : bool
Should be `True` if the attribute is unique and therefore this
should return only one value. Otherwise, returns a list of
values.
iterator : generator
A generator that iterates over some arbitrary set of elements
element_name : str
The XML element name of the elements being iterated over (used
for error messages only).
doc : str
A docstring to apply to the generated function.
Returns
-------
factory : function
A function that looks up an element by the given attribute.
"""
def lookup_by_attr(self, ref, before=None):
"""
Given a string *ref*, finds the first element in the iterator
where the given attribute == *ref*. If *before* is provided,
will stop searching at the object *before*. This is
important, since "forward references" are not allowed in the
VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if getattr(element, attr, None) == ref:
vo_raise(
f"{element_name} references itself",
element._config,
element._pos,
KeyError,
)
break
if getattr(element, attr, None) == ref:
yield element
def lookup_by_attr_unique(self, ref, before=None):
for element in lookup_by_attr(self, ref, before=before):
return element
raise KeyError(
"No {} with {} '{}' found before the referencing {}".format(
element_name, attr, ref, element_name
)
)
if unique:
lookup_by_attr_unique.__doc__ = doc
return lookup_by_attr_unique
else:
lookup_by_attr.__doc__ = doc
return lookup_by_attr
def _lookup_by_id_or_name_factory(iterator, element_name, doc):
"""
Like `_lookup_by_attr_factory`, but looks in both the "ID" and
"name" attributes.
"""
def lookup_by_id_or_name(self, ref, before=None):
"""
Given an key *ref*, finds the first element in the iterator
with the attribute ID == *ref* or name == *ref*. If *before*
is provided, will stop searching at the object *before*. This
is important, since "forward references" are not allowed in
the VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if ref in (element.ID, element.name):
vo_raise(
f"{element_name} references itself",
element._config,
element._pos,
KeyError,
)
break
if ref in (element.ID, element.name):
return element
raise KeyError(
"No {} with ID or name '{}' found before the referencing {}".format(
element_name, ref, element_name
)
)
lookup_by_id_or_name.__doc__ = doc
return lookup_by_id_or_name
def _get_default_unit_format(config):
"""
Get the default unit format as specified in the VOTable spec.
"""
# The unit format changed between VOTable versions 1.3 and 1.4,
# see issue #10791.
if config["version_1_4_or_later"]:
return "vounit"
else:
return "cds"
def _get_unit_format(config):
"""
Get the unit format based on the configuration.
"""
if config.get("unit_format") is None:
format = _get_default_unit_format(config)
else:
format = config["unit_format"]
return format
######################################################################
# ATTRIBUTE CHECKERS
def check_astroyear(year, field, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*year* is not a valid astronomical year as defined by the VOTABLE
standard.
Parameters
----------
year : str
An astronomical year string
field : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if year is not None and re.match(r"^[JB]?[0-9]+([.][0-9]*)?$", year) is None:
warn_or_raise(W07, W07, (field, year), config, pos)
return False
return True
def check_string(string, attr_name, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*string* is not a string or Unicode string.
Parameters
----------
string : str
An astronomical year string
attr_name : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if string is not None and not isinstance(string, str):
warn_or_raise(W08, W08, attr_name, config, pos)
return False
return True
def resolve_id(ID, id, config=None, pos=None):
if ID is None and id is not None:
warn_or_raise(W09, W09, (), config, pos)
return id
return ID
def check_ucd(ucd, config=None, pos=None):
"""
Warns or raises a
`~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not
a valid `unified content descriptor`_ string as defined by the
VOTABLE standard.
Parameters
----------
ucd : str
A UCD string.
config, pos : optional
Information about the source of the value
"""
if config is None:
config = {}
if config.get("version_1_1_or_later"):
try:
ucd_mod.parse_ucd(
ucd,
check_controlled_vocabulary=config.get("version_1_2_or_later", False),
has_colon=config.get("version_1_2_or_later", False),
)
except ValueError as e:
# This weird construction is for Python 3 compatibility
if config.get("verify", "ignore") == "exception":
vo_raise(W06, (ucd, str(e)), config, pos)
elif config.get("verify", "ignore") == "warn":
vo_warn(W06, (ucd, str(e)), config, pos)
return False
else:
return False
return True
######################################################################
# PROPERTY MIXINS
class _IDProperty:
@property
def ID(self):
"""
The XML ID_ of the element. May be `None` or a string
conforming to XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@ID.deleter
def ID(self):
self._ID = None
class _NameProperty:
@property
def name(self):
"""An optional name for the element."""
return self._name
@name.setter
def name(self, name):
xmlutil.check_token(name, "name", self._config, self._pos)
self._name = name
@name.deleter
def name(self):
self._name = None
class _XtypeProperty:
@property
def xtype(self):
"""Extended data type information."""
return self._xtype
@xtype.setter
def xtype(self, xtype):
if xtype is not None and not self._config.get("version_1_2_or_later"):
warn_or_raise(
W28, W28, ("xtype", self._element_name, "1.2"), self._config, self._pos
)
check_string(xtype, "xtype", self._config, self._pos)
self._xtype = xtype
@xtype.deleter
def xtype(self):
self._xtype = None
class _UtypeProperty:
_utype_in_v1_2 = False
@property
def utype(self):
"""The usage-specific or `unique type`_ of the element."""
return self._utype
@utype.setter
def utype(self, utype):
if (
self._utype_in_v1_2
and utype is not None
and not self._config.get("version_1_2_or_later")
):
warn_or_raise(
W28, W28, ("utype", self._element_name, "1.2"), self._config, self._pos
)
check_string(utype, "utype", self._config, self._pos)
self._utype = utype
@utype.deleter
def utype(self):
self._utype = None
class _UcdProperty:
_ucd_in_v1_2 = False
@property
def ucd(self):
"""The `unified content descriptor`_ for the element."""
return self._ucd
@ucd.setter
def ucd(self, ucd):
if ucd is not None and ucd.strip() == "":
ucd = None
if ucd is not None:
if self._ucd_in_v1_2 and not self._config.get("version_1_2_or_later"):
warn_or_raise(
W28,
W28,
("ucd", self._element_name, "1.2"),
self._config,
self._pos,
)
check_ucd(ucd, self._config, self._pos)
self._ucd = ucd
@ucd.deleter
def ucd(self):
self._ucd = None
class _DescriptionProperty:
@property
def description(self):
"""
An optional string describing the element. Corresponds to the
DESCRIPTION_ element.
"""
return self._description
@description.setter
def description(self, description):
self._description = description
@description.deleter
def description(self):
self._description = None
######################################################################
# ELEMENT CLASSES
class Element:
"""
A base class for all classes that represent XML elements in the
VOTABLE file.
"""
_element_name = ""
_attr_list = []
def _add_unknown_tag(self, iterator, tag, data, config, pos):
warn_or_raise(W10, W10, tag, config, pos)
def _ignore_add(self, iterator, tag, data, config, pos):
warn_unknown_attrs(tag, data.keys(), config, pos)
def _add_definitions(self, iterator, tag, data, config, pos):
if config.get("version_1_1_or_later"):
warn_or_raise(W22, W22, (), config, pos)
warn_unknown_attrs(tag, data.keys(), config, pos)
def parse(self, iterator, config):
"""
For internal use. Parse the XML content of the children of the
element.
Parameters
----------
iterator : xml iterable
An iterator over XML elements as returned by
`~astropy.utils.xml.iterparser.get_xml_iterator`.
config : dict
The configuration dictionary that affects how certain
elements are read.
Returns
-------
self : `~astropy.io.votable.tree.Element`
Returns self as a convenience.
"""
raise NotImplementedError()
def to_xml(self, w, **kwargs):
"""
For internal use. Output the element to XML.
Parameters
----------
w : astropy.utils.xml.writer.XMLWriter object
An XML writer to write to.
**kwargs : dict
Any configuration parameters to control the output.
"""
raise NotImplementedError()
class SimpleElement(Element):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
Element.__init__(self)
def __repr__(self):
buff = io.StringIO()
SimpleElement.to_xml(self, XMLWriter(buff))
return buff.getvalue().strip()
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name, attrib=w.object_attrs(self, self._attr_list))
class SimpleElementWithContent(SimpleElement):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
SimpleElement.__init__(self)
self._content = None
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
if data:
self.content = data
break
return self
def to_xml(self, w, **kwargs):
w.element(
self._element_name,
self._content,
attrib=w.object_attrs(self, self._attr_list),
)
@property
def content(self):
"""The content of the element."""
return self._content
@content.setter
def content(self, content):
check_string(content, "content", self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
class Link(SimpleElement, _IDProperty):
"""
LINK_ elements: used to reference external documents and servers through a URI.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = [
"ID",
"content_role",
"content_type",
"title",
"value",
"href",
"action",
]
_element_name = "LINK"
def __init__(
self,
ID=None,
title=None,
value=None,
href=None,
action=None,
id=None,
config=None,
pos=None,
**kwargs,
):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
content_role = kwargs.get("content-role") or kwargs.get("content_role")
content_type = kwargs.get("content-type") or kwargs.get("content_type")
if "gref" in kwargs:
warn_or_raise(W11, W11, (), config, pos)
self.ID = resolve_id(ID, id, config, pos)
self.content_role = content_role
self.content_type = content_type
self.title = title
self.value = value
self.href = href
self.action = action
warn_unknown_attrs(
"LINK",
kwargs.keys(),
config,
pos,
["content-role", "content_role", "content-type", "content_type", "gref"],
)
@property
def content_role(self):
"""Defines the MIME role of the referenced object.
Must be one of:
None, 'query', 'hints', 'doc', 'location' or 'type'
"""
return self._content_role
@content_role.setter
def content_role(self, content_role):
if (
content_role == "type" and not self._config["version_1_3_or_later"]
) or content_role not in (None, "query", "hints", "doc", "location"):
vo_warn(W45, (content_role,), self._config, self._pos)
self._content_role = content_role
@content_role.deleter
def content_role(self):
self._content_role = None
@property
def content_type(self):
"""Defines the MIME content type of the referenced object."""
return self._content_type
@content_type.setter
def content_type(self, content_type):
xmlutil.check_mime_content_type(content_type, self._config, self._pos)
self._content_type = content_type
@content_type.deleter
def content_type(self):
self._content_type = None
@property
def href(self):
"""
A URI to an arbitrary protocol. The vo package only supports
http and anonymous ftp.
"""
return self._href
@href.setter
def href(self, href):
xmlutil.check_anyuri(href, self._config, self._pos)
self._href = href
@href.deleter
def href(self):
self._href = None
def to_table_column(self, column):
meta = {}
for key in self._attr_list:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
column.meta.setdefault("links", [])
column.meta["links"].append(meta)
@classmethod
def from_table_column(cls, d):
return cls(**d)
class Info(SimpleElementWithContent, _IDProperty, _XtypeProperty, _UtypeProperty):
"""
INFO_ elements: arbitrary key-value pairs for extensions to the standard.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_element_name = "INFO"
_attr_list_11 = ["ID", "name", "value"]
_attr_list_12 = _attr_list_11 + ["xtype", "ref", "unit", "ucd", "utype"]
_utype_in_v1_2 = True
def __init__(
self,
ID=None,
name=None,
value=None,
id=None,
xtype=None,
ref=None,
unit=None,
ucd=None,
utype=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElementWithContent.__init__(self)
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
self.value = value
self.xtype = xtype
self.ref = ref
self.unit = unit
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs("INFO", ["xtype"], config, pos)
if ref is not None:
warn_unknown_attrs("INFO", ["ref"], config, pos)
if unit is not None:
warn_unknown_attrs("INFO", ["unit"], config, pos)
if ucd is not None:
warn_unknown_attrs("INFO", ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs("INFO", ["utype"], config, pos)
warn_unknown_attrs("INFO", extra.keys(), config, pos)
@property
def name(self):
"""[*required*] The key of the key-value pair."""
return self._name
@name.setter
def name(self, name):
if name is None:
warn_or_raise(W35, W35, "name", self._config, self._pos)
xmlutil.check_token(name, "name", self._config, self._pos)
self._name = name
@property
def value(self):
"""
[*required*] The value of the key-value pair. (Always stored
as a string or unicode string).
"""
return self._value
@value.setter
def value(self, value):
if value is None:
warn_or_raise(W35, W35, "value", self._config, self._pos)
check_string(value, "value", self._config, self._pos)
self._value = value
@property
def content(self):
"""The content inside the INFO element."""
return self._content
@content.setter
def content(self, content):
check_string(content, "content", self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
@property
def ref(self):
"""
Refer to another INFO_ element by ID_, defined previously in
the document.
"""
return self._ref
@ref.setter
def ref(self, ref):
if ref is not None and not self._config.get("version_1_2_or_later"):
warn_or_raise(W28, W28, ("ref", "INFO", "1.2"), self._config, self._pos)
xmlutil.check_id(ref, "ref", self._config, self._pos)
# TODO: actually apply the reference
# if ref is not None:
# try:
# other = self._votable.get_values_by_id(ref, before=self)
# except KeyError:
# vo_raise(
# "VALUES ref='%s', which has not already been defined." %
# self.ref, self._config, self._pos, KeyError)
# self.null = other.null
# self.type = other.type
# self.min = other.min
# self.min_inclusive = other.min_inclusive
# self.max = other.max
# self.max_inclusive = other.max_inclusive
# self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the INFO_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
if not self._config.get("version_1_2_or_later"):
warn_or_raise(W28, W28, ("unit", "INFO", "1.2"), self._config, self._pos)
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(unit, format=default_format, parse_strict="silent")
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,), self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(unit, format=format, parse_strict="silent")
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if "unit" in attrib:
attrib["unit"] = self.unit.to_string("cds")
w.element(self._element_name, self._content, attrib=attrib)
class Values(Element, _IDProperty):
"""
VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
votable,
field,
ID=None,
null=None,
ref=None,
type="legal",
id=None,
config=None,
pos=None,
**extras,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._votable = votable
self._field = field
self.ID = resolve_id(ID, id, config, pos)
self.null = null
self._ref = ref
self.type = type
self.min = None
self.max = None
self.min_inclusive = True
self.max_inclusive = True
self._options = []
warn_unknown_attrs("VALUES", extras.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
self.to_xml(XMLWriter(buff))
return buff.getvalue().strip()
@property
def null(self):
"""
For integral datatypes, *null* is used to define the value
used for missing values.
"""
return self._null
@null.setter
def null(self, null):
if null is not None and isinstance(null, str):
try:
null_val = self._field.converter.parse_scalar(
null, self._config, self._pos
)[0]
except Exception:
warn_or_raise(W36, W36, null, self._config, self._pos)
null_val = self._field.converter.parse_scalar(
"0", self._config, self._pos
)[0]
else:
null_val = null
self._null = null_val
@null.deleter
def null(self):
self._null = None
@property
def type(self):
"""Defines the applicability of the domain defined by this VALUES_ element [*required*].
Must be one of the following strings:
- 'legal': The domain of this column applies in general to
this datatype. (default)
- 'actual': The domain of this column applies only to the
data enclosed in the parent table.
"""
return self._type
@type.setter
def type(self, type):
if type not in ("legal", "actual"):
vo_raise(E08, type, self._config, self._pos)
self._type = type
@property
def ref(self):
"""
Refer to another VALUES_ element by ID_, defined previously in
the document, for MIN/MAX/OPTION information.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
if ref is not None:
try:
other = self._votable.get_values_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ("VALUES", self.ref), self._config, self._pos)
ref = None
else:
self.null = other.null
self.type = other.type
self.min = other.min
self.min_inclusive = other.min_inclusive
self.max = other.max
self.max_inclusive = other.max_inclusive
self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def min(self):
"""
The minimum value of the domain. See :attr:`min_inclusive`.
"""
return self._min
@min.setter
def min(self, min):
if hasattr(self._field, "converter") and min is not None:
self._min = self._field.converter.parse(min)[0]
else:
self._min = min
@min.deleter
def min(self):
self._min = None
@property
def min_inclusive(self):
"""When `True`, the domain includes the minimum value."""
return self._min_inclusive
@min_inclusive.setter
def min_inclusive(self, inclusive):
if inclusive == "yes":
self._min_inclusive = True
elif inclusive == "no":
self._min_inclusive = False
else:
self._min_inclusive = bool(inclusive)
@min_inclusive.deleter
def min_inclusive(self):
self._min_inclusive = True
@property
def max(self):
"""
The maximum value of the domain. See :attr:`max_inclusive`.
"""
return self._max
@max.setter
def max(self, max):
if hasattr(self._field, "converter") and max is not None:
self._max = self._field.converter.parse(max)[0]
else:
self._max = max
@max.deleter
def max(self):
self._max = None
@property
def max_inclusive(self):
"""When `True`, the domain includes the maximum value."""
return self._max_inclusive
@max_inclusive.setter
def max_inclusive(self, inclusive):
if inclusive == "yes":
self._max_inclusive = True
elif inclusive == "no":
self._max_inclusive = False
else:
self._max_inclusive = bool(inclusive)
@max_inclusive.deleter
def max_inclusive(self):
self._max_inclusive = True
@property
def options(self):
"""
A list of string key-value tuples defining other OPTION
elements for the domain. All options are ignored -- they are
stored for round-tripping purposes only.
"""
return self._options
def parse(self, iterator, config):
if self.ref is not None:
for start, tag, data, pos in iterator:
if start:
warn_or_raise(W44, W44, tag, config, pos)
else:
if tag != "VALUES":
warn_or_raise(W44, W44, tag, config, pos)
break
else:
for start, tag, data, pos in iterator:
if start:
if tag == "MIN":
if "value" not in data:
vo_raise(E09, "MIN", config, pos)
self.min = data["value"]
self.min_inclusive = data.get("inclusive", "yes")
warn_unknown_attrs(
"MIN", data.keys(), config, pos, ["value", "inclusive"]
)
elif tag == "MAX":
if "value" not in data:
vo_raise(E09, "MAX", config, pos)
self.max = data["value"]
self.max_inclusive = data.get("inclusive", "yes")
warn_unknown_attrs(
"MAX", data.keys(), config, pos, ["value", "inclusive"]
)
elif tag == "OPTION":
if "value" not in data:
vo_raise(E09, "OPTION", config, pos)
xmlutil.check_token(data.get("name"), "name", config, pos)
self.options.append((data.get("name"), data.get("value")))
warn_unknown_attrs(
"OPTION", data.keys(), config, pos, ["value", "name"]
)
elif tag == "VALUES":
break
return self
def is_defaults(self):
"""
Are the settings on this ``VALUE`` element all the same as the
XML defaults?.
"""
# If there's nothing meaningful or non-default to write,
# don't write anything.
return (
self.ref is None
and self.null is None
and self.ID is None
and self.max is None
and self.min is None
and self.options == []
)
def to_xml(self, w, **kwargs):
def yes_no(value):
if value:
return "yes"
return "no"
if self.is_defaults():
return
if self.ref is not None:
w.element("VALUES", attrib=w.object_attrs(self, ["ref"]))
else:
with w.tag("VALUES", attrib=w.object_attrs(self, ["ID", "null", "ref"])):
if self.min is not None:
w.element(
"MIN",
value=self._field.converter.output(self.min, False),
inclusive=yes_no(self.min_inclusive),
)
if self.max is not None:
w.element(
"MAX",
value=self._field.converter.output(self.max, False),
inclusive=yes_no(self.max_inclusive),
)
for name, value in self.options:
w.element("OPTION", name=name, value=value)
def to_table_column(self, column):
# Have the ref filled in here
meta = {}
for key in ["ID", "null"]:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if self.min is not None:
meta["min"] = {"value": self.min, "inclusive": self.min_inclusive}
if self.max is not None:
meta["max"] = {"value": self.max, "inclusive": self.max_inclusive}
if len(self.options):
meta["options"] = dict(self.options)
column.meta["values"] = meta
def from_table_column(self, column):
if column.info.meta is None or "values" not in column.info.meta:
return
meta = column.info.meta["values"]
for key in ["ID", "null"]:
val = meta.get(key, None)
if val is not None:
setattr(self, key, val)
if "min" in meta:
self.min = meta["min"]["value"]
self.min_inclusive = meta["min"]["inclusive"]
if "max" in meta:
self.max = meta["max"]["value"]
self.max_inclusive = meta["max"]["inclusive"]
if "options" in meta:
self._options = list(meta["options"].items())
class Field(
SimpleElement,
_IDProperty,
_NameProperty,
_XtypeProperty,
_UtypeProperty,
_UcdProperty,
):
"""
FIELD_ element: describes the datatype of a particular column of data.
The keyword arguments correspond to setting members of the same
name, documented below.
If *ID* is provided, it is used for the column name in the
resulting recarray of the table. If no *ID* is provided, *name*
is used instead. If neither is provided, an exception will be
raised.
"""
_attr_list_11 = [
"ID",
"name",
"datatype",
"arraysize",
"ucd",
"unit",
"width",
"precision",
"utype",
"ref",
]
_attr_list_12 = _attr_list_11 + ["xtype"]
_element_name = "FIELD"
def __init__(
self,
votable,
ID=None,
name=None,
datatype=None,
arraysize=None,
ucd=None,
unit=None,
width=None,
precision=None,
utype=None,
ref=None,
type=None,
id=None,
xtype=None,
config=None,
pos=None,
**extra,
):
if config is None:
if hasattr(votable, "_get_version_checks"):
config = votable._get_version_checks()
else:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs(self._element_name, ["xtype"], config, pos)
# TODO: REMOVE ME ----------------------------------------
# This is a terrible hack to support Simple Image Access
# Protocol results from https://astroarchive.noirlab.edu/ . It creates a field
# for the coordinate projection type of type "double", which
# actually contains character data. We have to hack the field
# to store character data, or we can't read it in. A warning
# will be raised when this happens.
if (
config.get("verify", "ignore") != "exception"
and name == "cprojection"
and ID == "cprojection"
and ucd == "VOX:WCS_CoordProjection"
and datatype == "double"
):
datatype = "char"
arraysize = "3"
vo_warn(W40, (), config, pos)
# ----------------------------------------
self.description = None
self._votable = votable
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
if name is None:
if self._element_name == "PARAM" and not config.get("version_1_1_or_later"):
pass
else:
warn_or_raise(W15, W15, self._element_name, config, pos)
self.name = self.ID
if self._ID is None and name is None:
vo_raise(W12, self._element_name, config, pos)
datatype_mapping = {
"string": "char",
"unicodeString": "unicodeChar",
"int16": "short",
"int32": "int",
"int64": "long",
"float32": "float",
"float64": "double",
# The following appear in some Vizier tables
"unsignedInt": "long",
"unsignedShort": "int",
}
datatype_mapping.update(config.get("datatype_mapping", {}))
if datatype in datatype_mapping:
warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]), config, pos)
datatype = datatype_mapping[datatype]
self.ref = ref
self.datatype = datatype
self.arraysize = arraysize
self.ucd = ucd
self.unit = unit
self.width = width
self.precision = precision
self.utype = utype
self.type = type
self._links = HomogeneousList(Link)
self.title = self.name
self.values = Values(self._votable, self)
self.xtype = xtype
self._setup(config, pos)
warn_unknown_attrs(self._element_name, extra.keys(), config, pos)
@classmethod
def uniqify_names(cls, fields):
"""
Make sure that all names and titles in a list of fields are
unique, by appending numbers if necessary.
"""
unique = {}
for field in fields:
i = 2
new_id = field.ID
while new_id in unique:
new_id = field.ID + f"_{i:d}"
i += 1
if new_id != field.ID:
vo_warn(W32, (field.ID, new_id), field._config, field._pos)
field.ID = new_id
unique[new_id] = field.ID
for field in fields:
i = 2
if field.name is None:
new_name = field.ID
implicit = True
else:
new_name = field.name
implicit = False
if new_name != field.ID:
while new_name in unique:
new_name = field.name + f" {i:d}"
i += 1
if not implicit and new_name != field.name:
vo_warn(W33, (field.name, new_name), field._config, field._pos)
field._unique_name = new_name
unique[new_name] = field.name
def _setup(self, config, pos):
if self.values._ref is not None:
self.values.ref = self.values._ref
self.converter = converters.get_converter(self, config, pos)
@property
def datatype(self):
"""The datatype of the column [*required*].
Valid values (as defined by the spec) are:
'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',
'char', 'unicodeChar', 'float', 'double', 'floatComplex', or
'doubleComplex'
Many VOTABLE files in the wild use 'string' instead of 'char',
so that is also a valid option, though 'string' will always be
converted to 'char' when writing the file back out.
"""
return self._datatype
@datatype.setter
def datatype(self, datatype):
if datatype is None:
if self._config.get("version_1_1_or_later"):
warn_or_raise(E10, E10, self._element_name, self._config, self._pos)
datatype = "char"
if datatype not in converters.converter_mapping:
vo_raise(E06, (datatype, self.ID), self._config, self._pos)
self._datatype = datatype
@property
def precision(self):
"""
Along with :attr:`width`, defines the `numerical accuracy`_
associated with the data. These values are used to limit the
precision when writing floating point values back to the XML
file. Otherwise, it is purely informational -- the Numpy
recarray containing the data itself does not use this
information.
"""
return self._precision
@precision.setter
def precision(self, precision):
if precision is not None and not re.match(r"^[FE]?[0-9]+$", precision):
vo_raise(E11, precision, self._config, self._pos)
self._precision = precision
@precision.deleter
def precision(self):
self._precision = None
@property
def width(self):
"""
Along with :attr:`precision`, defines the `numerical
accuracy`_ associated with the data. These values are used to
limit the precision when writing floating point values back to
the XML file. Otherwise, it is purely informational -- the
Numpy recarray containing the data itself does not use this
information.
"""
return self._width
@width.setter
def width(self, width):
if width is not None:
width = int(width)
if width <= 0:
vo_raise(E12, width, self._config, self._pos)
self._width = width
@width.deleter
def width(self):
self._width = None
# ref on FIELD and PARAM behave differently than elsewhere -- here
# they're just informational, such as to refer to a coordinate
# system.
@property
def ref(self):
"""
On FIELD_ elements, ref is used only for informational
purposes, for example to refer to a COOSYS_ or TIMESYS_ element.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the FIELD_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(unit, format=default_format, parse_strict="silent")
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,), self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(unit, format=format, parse_strict="silent")
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
@property
def arraysize(self):
"""
Specifies the size of the multidimensional array if this
FIELD_ contains more than a single value.
See `multidimensional arrays`_.
"""
return self._arraysize
@arraysize.setter
def arraysize(self, arraysize):
if arraysize is not None and not re.match(
r"^([0-9]+x)*[0-9]*[*]?(s\W)?$", arraysize
):
vo_raise(E13, arraysize, self._config, self._pos)
self._arraysize = arraysize
@arraysize.deleter
def arraysize(self):
self._arraysize = None
@property
def type(self):
"""
The type attribute on FIELD_ elements is reserved for future
extensions.
"""
return self._type
@type.setter
def type(self, type):
self._type = type
@type.deleter
def type(self):
self._type = None
@property
def values(self):
"""
A :class:`Values` instance (or `None`) defining the domain
of the column.
"""
return self._values
@values.setter
def values(self, values):
assert values is None or isinstance(values, Values)
self._values = values
@values.deleter
def values(self):
self._values = None
@property
def links(self):
"""
A list of :class:`Link` instances used to reference more
details about the meaning of the FIELD_. This is purely
informational and is not used by the `astropy.io.votable`
package.
"""
return self._links
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start:
if tag == "VALUES":
self.values.__init__(
self._votable, self, config=config, pos=pos, **data
)
self.values.parse(iterator, config)
elif tag == "LINK":
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
elif tag == "DESCRIPTION":
warn_unknown_attrs("DESCRIPTION", data.keys(), config, pos)
elif tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, self._element_name, config, pos)
self.description = data or None
elif tag == self._element_name:
break
if self.description is not None:
self.title = " ".join(x.strip() for x in self.description.splitlines())
else:
self.title = self.name
self._setup(config, pos)
return self
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if "unit" in attrib:
attrib["unit"] = self.unit.to_string("cds")
with w.tag(self._element_name, attrib=attrib):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
if not self.values.is_defaults():
self.values.to_xml(w, **kwargs)
for link in self.links:
link.to_xml(w, **kwargs)
def to_table_column(self, column):
"""
Sets the attributes of a given `astropy.table.Column` instance
to match the information in this `Field`.
"""
for key in ["ucd", "width", "precision", "utype", "xtype"]:
val = getattr(self, key, None)
if val is not None:
column.meta[key] = val
if not self.values.is_defaults():
self.values.to_table_column(column)
for link in self.links:
link.to_table_column(column)
if self.description is not None:
column.description = self.description
if self.unit is not None:
# TODO: Use units framework when it's available
column.unit = self.unit
if (
isinstance(self.converter, converters.FloatingPoint)
and self.converter.output_format != "{!r:>}"
):
column.format = self.converter.output_format
elif isinstance(self.converter, converters.Char):
column.info.meta["_votable_string_dtype"] = "char"
elif isinstance(self.converter, converters.UnicodeChar):
column.info.meta["_votable_string_dtype"] = "unicodeChar"
@classmethod
def from_table_column(cls, votable, column):
"""
Restores a `Field` instance from a given
`astropy.table.Column` instance.
"""
kwargs = {}
meta = column.info.meta
if meta:
for key in ["ucd", "width", "precision", "utype", "xtype"]:
val = meta.get(key, None)
if val is not None:
kwargs[key] = val
# TODO: Use the unit framework when available
if column.info.unit is not None:
kwargs["unit"] = column.info.unit
kwargs["name"] = column.info.name
result = converters.table_column_to_votable_datatype(column)
kwargs.update(result)
field = cls(votable, **kwargs)
if column.info.description is not None:
field.description = column.info.description
field.values.from_table_column(column)
if meta and "links" in meta:
for link in meta["links"]:
field.links.append(Link.from_table_column(link))
# TODO: Parse format into precision and width
return field
class Param(Field):
"""
PARAM_ element: constant-valued columns in the data.
:class:`Param` objects are a subclass of :class:`Field`, and have
all of its methods and members. Additionally, it defines :attr:`value`.
"""
_attr_list_11 = Field._attr_list_11 + ["value"]
_attr_list_12 = Field._attr_list_12 + ["value"]
_element_name = "PARAM"
def __init__(
self,
votable,
ID=None,
name=None,
value=None,
datatype=None,
arraysize=None,
ucd=None,
unit=None,
width=None,
precision=None,
utype=None,
type=None,
id=None,
config=None,
pos=None,
**extra,
):
self._value = value
Field.__init__(
self,
votable,
ID=ID,
name=name,
datatype=datatype,
arraysize=arraysize,
ucd=ucd,
unit=unit,
precision=precision,
utype=utype,
type=type,
id=id,
config=config,
pos=pos,
**extra,
)
@property
def value(self):
"""
[*required*] The constant value of the parameter. Its type is
determined by the :attr:`~Field.datatype` member.
"""
return self._value
@value.setter
def value(self, value):
if value is None:
value = ""
if isinstance(value, str):
self._value = self.converter.parse(value, self._config, self._pos)[0]
else:
self._value = value
def _setup(self, config, pos):
Field._setup(self, config, pos)
self.value = self._value
def to_xml(self, w, **kwargs):
tmp_value = self._value
self._value = self.converter.output(tmp_value, False)
# We must always have a value
if self._value is None:
self._value = ""
Field.to_xml(self, w, **kwargs)
self._value = tmp_value
class CooSys(SimpleElement):
"""
COOSYS_ element: defines a coordinate system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ["ID", "equinox", "epoch", "system"]
_element_name = "COOSYS"
def __init__(
self,
ID=None,
equinox=None,
epoch=None,
system=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
# COOSYS was deprecated in 1.2 but then re-instated in 1.3
if config.get("version_1_2_or_later") and not config.get(
"version_1_3_or_later"
):
warn_or_raise(W27, W27, (), config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.equinox = equinox
self.epoch = epoch
self.system = system
warn_unknown_attrs("COOSYS", extra.keys(), config, pos)
@property
def ID(self):
"""
[*required*] The XML ID of the COOSYS_ element, used for
cross-referencing. May be `None` or a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if self._config.get("version_1_1_or_later"):
if ID is None:
vo_raise(E15, (), self._config, self._pos)
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@property
def system(self):
"""Specifies the type of coordinate system.
Valid choices are:
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', or 'geo_app'
"""
return self._system
@system.setter
def system(self, system):
if system not in (
"eq_FK4",
"eq_FK5",
"ICRS",
"ecl_FK4",
"ecl_FK5",
"galactic",
"supergalactic",
"xy",
"barycentric",
"geo_app",
):
warn_or_raise(E16, E16, system, self._config, self._pos)
self._system = system
@system.deleter
def system(self):
self._system = None
@property
def equinox(self):
"""
A parameter required to fix the equatorial or ecliptic systems
(as e.g. "J2000" as the default "eq_FK5" or "B1950" as the
default "eq_FK4").
"""
return self._equinox
@equinox.setter
def equinox(self, equinox):
check_astroyear(equinox, "equinox", self._config, self._pos)
self._equinox = equinox
@equinox.deleter
def equinox(self):
self._equinox = None
@property
def epoch(self):
"""
Specifies the epoch of the positions. It must be a string
specifying an astronomical year.
"""
return self._epoch
@epoch.setter
def epoch(self, epoch):
check_astroyear(epoch, "epoch", self._config, self._pos)
self._epoch = epoch
@epoch.deleter
def epoch(self):
self._epoch = None
class TimeSys(SimpleElement):
"""
TIMESYS_ element: defines a time system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ["ID", "timeorigin", "timescale", "refposition"]
_element_name = "TIMESYS"
def __init__(
self,
ID=None,
timeorigin=None,
timescale=None,
refposition=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
# TIMESYS is supported starting in version 1.4
if not config["version_1_4_or_later"]:
warn_or_raise(W54, W54, config["version"], config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.timeorigin = timeorigin
self.timescale = timescale
self.refposition = refposition
warn_unknown_attrs(
"TIMESYS",
extra.keys(),
config,
pos,
["ID", "timeorigin", "timescale", "refposition"],
)
@property
def ID(self):
"""
[*required*] The XML ID of the TIMESYS_ element, used for
cross-referencing. Must be a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if ID is None:
vo_raise(E22, (), self._config, self._pos)
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@property
def timeorigin(self):
"""
Specifies the time origin of the time coordinate,
given as a Julian Date for the the time scale and
reference point defined. It is usually given as a
floating point literal; for convenience, the magic
strings "MJD-origin" (standing for 2400000.5) and
"JD-origin" (standing for 0) are also allowed.
The timeorigin attribute MUST be given unless the
time’s representation contains a year of a calendar
era, in which case it MUST NOT be present. In VOTables,
these representations currently are Gregorian calendar
years with xtype="timestamp", or years in the Julian
or Besselian calendar when a column has yr, a, or Ba as
its unit and no time origin is given.
"""
return self._timeorigin
@timeorigin.setter
def timeorigin(self, timeorigin):
if (
timeorigin is not None
and timeorigin != "MJD-origin"
and timeorigin != "JD-origin"
):
try:
timeorigin = float(timeorigin)
except ValueError:
warn_or_raise(E23, E23, timeorigin, self._config, self._pos)
self._timeorigin = timeorigin
@timeorigin.deleter
def timeorigin(self):
self._timeorigin = None
@property
def timescale(self):
"""
[*required*] String specifying the time scale used. Values
should be taken from the IVOA timescale vocabulary (documented
at http://www.ivoa.net/rdf/timescale).
"""
return self._timescale
@timescale.setter
def timescale(self, timescale):
self._timescale = timescale
@timescale.deleter
def timescale(self):
self._timescale = None
@property
def refposition(self):
"""
[*required*] String specifying the reference position. Values
should be taken from the IVOA refposition vocabulary (documented
at http://www.ivoa.net/rdf/refposition).
"""
return self._refposition
@refposition.setter
def refposition(self, refposition):
self._refposition = refposition
@refposition.deleter
def refposition(self):
self._refposition = None
class FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.
"""
_attr_list_11 = ["ref"]
_attr_list_12 = _attr_list_11 + ["ucd", "utype"]
_element_name = "FIELDref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(
self, table, ref, ucd=None, utype=None, config=None, pos=None, **extra
):
"""
*table* is the :class:`Table` object that this :class:`FieldRef`
is a member of.
*ref* is the ID to reference a :class:`Field` object defined
elsewhere.
"""
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ["utype"], config, pos)
@property
def ref(self):
"""The ID_ of the FIELD_ that this FIELDref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Field` instance that this :class:`FieldRef`
references.
"""
for field in self._table._votable.iter_fields_and_params():
if isinstance(field, Field) and field.ID == self.ref:
return field
vo_raise(f"No field named '{self.ref}'", self._config, self._pos, KeyError)
class ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
It contains the following publicly-accessible members:
*ref*: An XML ID referring to a <PARAM> element.
"""
_attr_list_11 = ["ref"]
_attr_list_12 = _attr_list_11 + ["ucd", "utype"]
_element_name = "PARAMref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ["utype"], config, pos)
@property
def ref(self):
"""The ID_ of the PARAM_ that this PARAMref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Param` instance that this :class:``PARAMref``
references.
"""
for param in self._table._votable.iter_fields_and_params():
if isinstance(param, Param) and param.ID == self.ref:
return param
vo_raise(f"No params named '{self.ref}'", self._config, self._pos, KeyError)
class Group(
Element,
_IDProperty,
_NameProperty,
_UtypeProperty,
_UcdProperty,
_DescriptionProperty,
):
"""
GROUP_ element: groups FIELD_ and PARAM_ elements.
This information is currently ignored by the vo package---that is
the columns in the recarray are always flat---but the grouping
information is stored so that it can be written out again to the
XML file.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
table,
ID=None,
name=None,
ref=None,
ucd=None,
utype=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
self.ref = ref
self.ucd = ucd
self.utype = utype
self.description = None
self._entries = HomogeneousList((FieldRef, ParamRef, Group, Param))
warn_unknown_attrs("GROUP", extra.keys(), config, pos)
def __repr__(self):
return f"<GROUP>... {len(self._entries)} entries ...</GROUP>"
@property
def ref(self):
"""
Currently ignored, as it's not clear from the spec how this is
meant to work.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def entries(self):
"""
[read-only] A list of members of the GROUP_. This list may
only contain objects of type :class:`Param`, :class:`Group`,
:class:`ParamRef` and :class:`FieldRef`.
"""
return self._entries
def _add_fieldref(self, iterator, tag, data, config, pos):
fieldref = FieldRef(self._table, config=config, pos=pos, **data)
self.entries.append(fieldref)
def _add_paramref(self, iterator, tag, data, config, pos):
paramref = ParamRef(self._table, config=config, pos=pos, **data)
self.entries.append(paramref)
def _add_param(self, iterator, tag, data, config, pos):
if isinstance(self._table, VOTableFile):
votable = self._table
else:
votable = self._table._votable
param = Param(votable, config=config, pos=pos, **data)
self.entries.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self._table, config=config, pos=pos, **data)
self.entries.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
tag_mapping = {
"FIELDref": self._add_fieldref,
"PARAMref": self._add_paramref,
"PARAM": self._add_param,
"GROUP": self._add_group,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "GROUP", config, pos)
self.description = data or None
elif tag == "GROUP":
break
return self
def to_xml(self, w, **kwargs):
with w.tag(
"GROUP", attrib=w.object_attrs(self, ["ID", "name", "ref", "ucd", "utype"])
):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for entry in self.entries:
entry.to_xml(w, **kwargs)
def iter_fields_and_params(self):
"""
Recursively iterate over all :class:`Param` elements in this
:class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Param):
yield entry
elif isinstance(entry, Group):
yield from entry.iter_fields_and_params()
def iter_groups(self):
"""
Recursively iterate over all sub-:class:`Group` instances in
this :class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Group):
yield entry
yield from entry.iter_groups()
class Table(Element, _IDProperty, _NameProperty, _UcdProperty, _DescriptionProperty):
"""
TABLE_ element: optionally contains data.
It contains the following publicly-accessible and mutable
attribute:
*array*: A Numpy masked array of the data itself, where each
row is a row of votable data, and columns are named and typed
based on the <FIELD> elements of the table. The mask is
parallel to the data array, except for variable-length fields.
For those fields, the numpy array's column type is "object"
(``"O"``), and another masked array is stored there.
If the Table contains no data, (for example, its enclosing
:class:`Resource` has :attr:`~Resource.type` == 'meta') *array*
will have zero-length.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
votable,
ID=None,
name=None,
ref=None,
ucd=None,
utype=None,
nrows=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
self._empty = False
Element.__init__(self)
self._votable = votable
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
xmlutil.check_id(ref, "ref", config, pos)
self._ref = ref
self.ucd = ucd
self.utype = utype
if nrows is not None:
nrows = int(nrows)
if nrows < 0:
raise ValueError("'nrows' cannot be negative.")
self._nrows = nrows
self.description = None
self.format = "tabledata"
self._fields = HomogeneousList(Field)
self._params = HomogeneousList(Param)
self._groups = HomogeneousList(Group)
self._links = HomogeneousList(Link)
self._infos = HomogeneousList(Info)
self.array = ma.array([])
warn_unknown_attrs("TABLE", extra.keys(), config, pos)
def __repr__(self):
s = repr(self.to_table())
if s.startswith("<Table"):
s = "<VO" + s[1:]
return s
def __bytes__(self):
return bytes(self.to_table())
def __str__(self):
return str(self.to_table())
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, ref):
"""
Refer to another TABLE, previously defined, by the *ref* ID_
for all metadata (FIELD_, PARAM_ etc.) information.
"""
# When the ref changes, we want to verify that it will work
# by actually going and looking for the referenced table.
# If found, set a bunch of properties in this table based
# on the other one.
xmlutil.check_id(ref, "ref", self._config, self._pos)
if ref is not None:
try:
table = self._votable.get_table_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ("TABLE", self.ref), self._config, self._pos)
ref = None
else:
self._fields = table.fields
self._params = table.params
self._groups = table.groups
self._links = table.links
else:
del self._fields[:]
del self._params[:]
del self._groups[:]
del self._links[:]
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def format(self):
"""The serialization format of the table [*required*].
Must be one of:
'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)
'fits' (FITS_).
Note that the 'fits' format, since it requires an external
file, can not be written out. Any file read in with 'fits'
format will be read out, by default, in 'tabledata' format.
See :ref:`astropy:votable-serialization`.
"""
return self._format
@format.setter
def format(self, format):
format = format.lower()
if format == "fits":
vo_raise(
"fits format can not be written out, only read.",
self._config,
self._pos,
NotImplementedError,
)
if format == "binary2":
if not self._config["version_1_3_or_later"]:
vo_raise(
"binary2 only supported in votable 1.3 or later",
self._config,
self._pos,
)
elif format not in ("tabledata", "binary"):
vo_raise(f"Invalid format '{format}'", self._config, self._pos)
self._format = format
@property
def nrows(self):
"""
[*immutable*] The number of rows in the table, as specified in
the XML file.
"""
return self._nrows
@property
def fields(self):
"""
A list of :class:`Field` objects describing the types of each
of the data columns.
"""
return self._fields
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
table. Must contain only :class:`Param` objects.
"""
return self._params
@property
def groups(self):
"""
A list of :class:`Group` objects describing how the columns
and parameters are grouped. Currently this information is
only kept around for round-tripping and informational
purposes.
"""
return self._groups
@property
def links(self):
"""
A list of :class:`Link` objects (pointers to other documents
or servers through a URI) for the table.
"""
return self._links
@property
def infos(self):
"""
A list of :class:`Info` objects for the table. Allows for
post-operational diagnostics.
"""
return self._infos
def is_empty(self):
"""
Returns True if this table doesn't contain any real data
because it was skipped over by the parser (through use of the
``table_number`` kwarg).
"""
return self._empty
def create_arrays(self, nrows=0, config=None):
"""
Create a new array to hold the data based on the current set
of fields, and store them in the *array* and member variable.
Any data in the existing array will be lost.
*nrows*, if provided, is the number of rows to allocate.
"""
if nrows is None:
nrows = 0
fields = self.fields
if len(fields) == 0:
array = np.recarray((nrows,), dtype="O")
mask = np.zeros((nrows,), dtype="b")
else:
# for field in fields: field._setup(config)
Field.uniqify_names(fields)
dtype = []
for x in fields:
if x._unique_name == x.ID:
id = x.ID
else:
id = (x._unique_name, x.ID)
dtype.append((id, x.converter.format))
array = np.recarray((nrows,), dtype=np.dtype(dtype))
descr_mask = []
for d in array.dtype.descr:
new_type = (d[1][1] == "O" and "O") or "bool"
if len(d) == 2:
descr_mask.append((d[0], new_type))
elif len(d) == 3:
descr_mask.append((d[0], new_type, d[2]))
mask = np.zeros((nrows,), dtype=descr_mask)
self.array = ma.array(array, mask=mask)
def _resize_strategy(self, size):
"""
Return a new (larger) size based on size, used for
reallocating an array when it fills up. This is in its own
function so the resizing strategy can be easily replaced.
"""
# Once we go beyond 0, make a big step -- after that use a
# factor of 1.5 to help keep memory usage compact
if size == 0:
return 512
return int(np.ceil(size * RESIZE_AMOUNT))
def _add_field(self, iterator, tag, data, config, pos):
field = Field(self._votable, config=config, pos=pos, **data)
self.fields.append(field)
field.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
if not config.get("version_1_2_or_later"):
warn_or_raise(W26, W26, ("INFO", "TABLE", "1.2"), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def parse(self, iterator, config):
columns = config.get("columns")
# If we've requested to read in only a specific table, skip
# all others
table_number = config.get("table_number")
current_table_number = config.get("_current_table_number")
skip_table = False
if current_table_number is not None:
config["_current_table_number"] += 1
if table_number is not None and table_number != current_table_number:
skip_table = True
self._empty = True
table_id = config.get("table_id")
if table_id is not None:
if table_id != self.ID:
skip_table = True
self._empty = True
if self.ref is not None:
# This table doesn't have its own datatype descriptors, it
# just references those from another table.
# This is to call the property setter to go and get the
# referenced information
self.ref = self.ref
for start, tag, data, pos in iterator:
if start:
if tag == "DATA":
warn_unknown_attrs("DATA", data.keys(), config, pos)
break
else:
if tag == "TABLE":
return self
elif tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "RESOURCE", config, pos)
self.description = data or None
else:
tag_mapping = {
"FIELD": self._add_field,
"PARAM": self._add_param,
"GROUP": self._add_group,
"LINK": self._add_link,
"INFO": self._add_info,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
if tag == "DATA":
if len(self.fields) == 0:
warn_or_raise(E25, E25, None, config, pos)
warn_unknown_attrs("DATA", data.keys(), config, pos)
break
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "RESOURCE", config, pos)
self.description = data or None
elif tag == "TABLE":
# For error checking purposes
Field.uniqify_names(self.fields)
# We still need to create arrays, even if the file
# contains no DATA section
self.create_arrays(nrows=0, config=config)
return self
self.create_arrays(nrows=self._nrows, config=config)
fields = self.fields
names = [x.ID for x in fields]
# Deal with a subset of the columns, if requested.
if not columns:
colnumbers = list(range(len(fields)))
else:
if isinstance(columns, str):
columns = [columns]
columns = np.asarray(columns)
if issubclass(columns.dtype.type, np.integer):
if np.any(columns < 0) or np.any(columns > len(fields)):
raise ValueError("Some specified column numbers out of range")
colnumbers = columns
elif issubclass(columns.dtype.type, np.character):
try:
colnumbers = [names.index(x) for x in columns]
except ValueError:
raise ValueError(f"Columns '{columns}' not found in fields list")
else:
raise TypeError("Invalid columns list")
if (not skip_table) and (len(fields) > 0):
for start, tag, data, pos in iterator:
if start:
if tag == "TABLEDATA":
warn_unknown_attrs("TABLEDATA", data.keys(), config, pos)
self.array = self._parse_tabledata(
iterator, colnumbers, fields, config
)
break
elif tag == "BINARY":
warn_unknown_attrs("BINARY", data.keys(), config, pos)
self.array = self._parse_binary(
1, iterator, colnumbers, fields, config, pos
)
break
elif tag == "BINARY2":
if not config["version_1_3_or_later"]:
warn_or_raise(W52, W52, config["version"], config, pos)
self.array = self._parse_binary(
2, iterator, colnumbers, fields, config, pos
)
break
elif tag == "FITS":
warn_unknown_attrs("FITS", data.keys(), config, pos, ["extnum"])
try:
extnum = int(data.get("extnum", 0))
if extnum < 0:
raise ValueError("'extnum' cannot be negative.")
except ValueError:
vo_raise(E17, (), config, pos)
self.array = self._parse_fits(iterator, extnum, config)
break
else:
warn_or_raise(W37, W37, tag, config, pos)
break
for start, tag, data, pos in iterator:
if not start and tag == "DATA":
break
for start, tag, data, pos in iterator:
if start and tag == "INFO":
if not config.get("version_1_2_or_later"):
warn_or_raise(W26, W26, ("INFO", "TABLE", "1.2"), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
elif not start and tag == "TABLE":
break
return self
def _parse_tabledata(self, iterator, colnumbers, fields, config):
# Since we don't know the number of rows up front, we'll
# reallocate the record array to make room as we go. This
# prevents the need to scan through the XML twice. The
# allocation is by factors of 1.5.
invalid = config.get("invalid", "exception")
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
parsers = [field.converter.parse for field in fields]
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
colnumbers_bits = [i in colnumbers for i in range(len(fields))]
row_default = [x.converter.default for x in fields]
mask_default = [True] * len(fields)
array_chunk = []
mask_chunk = []
chunk_size = config.get("chunk_size", DEFAULT_CHUNK_SIZE)
for start, tag, data, pos in iterator:
if tag == "TR":
# Now parse one row
row = row_default[:]
row_mask = mask_default[:]
i = 0
for start, tag, data, pos in iterator:
if start:
binary = data.get("encoding", None) == "base64"
warn_unknown_attrs(tag, data.keys(), config, pos, ["encoding"])
else:
if tag == "TD":
if i >= len(fields):
vo_raise(E20, len(fields), config, pos)
if colnumbers_bits[i]:
try:
if binary:
rawdata = base64.b64decode(data.encode("ascii"))
buf = io.BytesIO(rawdata)
buf.seek(0)
try:
value, mask_value = binparsers[i](buf.read)
except Exception as e:
vo_reraise(
e,
config,
pos,
"(in row {:d}, col '{}')".format(
len(array_chunk), fields[i].ID
),
)
else:
try:
value, mask_value = parsers[i](
data, config, pos
)
except Exception as e:
vo_reraise(
e,
config,
pos,
"(in row {:d}, col '{}')".format(
len(array_chunk), fields[i].ID
),
)
except Exception as e:
if invalid == "exception":
vo_reraise(e, config, pos)
else:
row[i] = value
row_mask[i] = mask_value
elif tag == "TR":
break
else:
self._add_unknown_tag(iterator, tag, data, config, pos)
i += 1
if i < len(fields):
vo_raise(E21, (i, len(fields)), config, pos)
array_chunk.append(tuple(row))
mask_chunk.append(tuple(row_mask))
if len(array_chunk) == chunk_size:
while numrows + chunk_size > alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
if alloc_rows != len(array):
array = _resize(array, alloc_rows)
array[numrows : numrows + chunk_size] = array_chunk
array.mask[numrows : numrows + chunk_size] = mask_chunk
numrows += chunk_size
array_chunk = []
mask_chunk = []
elif not start and tag == "TABLEDATA":
break
# Now, resize the array to the exact number of rows we need and
# put the last chunk values in there.
alloc_rows = numrows + len(array_chunk)
array = _resize(array, alloc_rows)
array[numrows:] = array_chunk
if alloc_rows != 0:
array.mask[numrows:] = mask_chunk
numrows += len(array_chunk)
if self.nrows is not None and self.nrows >= 0 and self.nrows != numrows:
warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)
self._nrows = numrows
return array
def _get_binary_data_stream(self, iterator, config):
have_local_stream = False
for start, tag, data, pos in iterator:
if tag == "STREAM":
if start:
warn_unknown_attrs(
"STREAM",
data.keys(),
config,
pos,
["type", "href", "actuate", "encoding", "expires", "rights"],
)
if "href" not in data:
have_local_stream = True
if data.get("encoding", None) != "base64":
warn_or_raise(
W38, W38, data.get("encoding", None), config, pos
)
else:
href = data["href"]
xmlutil.check_anyuri(href, config, pos)
encoding = data.get("encoding", None)
else:
buffer = data
break
if have_local_stream:
buffer = base64.b64decode(buffer.encode("ascii"))
string_io = io.BytesIO(buffer)
string_io.seek(0)
read = string_io.read
else:
if not href.startswith(("http", "ftp", "file")):
vo_raise(
"The vo package only supports remote data through http, "
+ "ftp or file",
self._config,
self._pos,
NotImplementedError,
)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == "gzip":
fd = gzip.GzipFile(href, "rb", fileobj=fd)
elif encoding == "base64":
fd = codecs.EncodedFile(fd, "base64")
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config,
self._pos,
NotImplementedError,
)
read = fd.read
def careful_read(length):
result = read(length)
if len(result) != length:
raise EOFError
return result
return careful_read
def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):
fields = self.fields
careful_read = self._get_binary_data_stream(iterator, config)
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
while True:
# Resize result arrays if necessary
if numrows >= alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
array = _resize(array, alloc_rows)
row_data = []
row_mask_data = []
try:
if mode == 2:
mask_bits = careful_read(int((len(fields) + 7) / 8))
row_mask_data = list(
converters.bitarray_to_bool(mask_bits, len(fields))
)
# Ignore the mask for string columns (see issue 8995)
for i, f in enumerate(fields):
if row_mask_data[i] and (
f.datatype == "char" or f.datatype == "unicodeChar"
):
row_mask_data[i] = False
for i, binparse in enumerate(binparsers):
try:
value, value_mask = binparse(careful_read)
except EOFError:
raise
except Exception as e:
vo_reraise(
e,
config,
pos,
f"(in row {numrows:d}, col '{fields[i].ID}')",
)
row_data.append(value)
if mode == 1:
row_mask_data.append(value_mask)
else:
row_mask_data[i] = row_mask_data[i] or value_mask
except EOFError:
break
row = [x.converter.default for x in fields]
row_mask = [False] * len(fields)
for i in colnumbers:
row[i] = row_data[i]
row_mask[i] = row_mask_data[i]
array[numrows] = tuple(row)
array.mask[numrows] = tuple(row_mask)
numrows += 1
array = _resize(array, numrows)
return array
def _parse_fits(self, iterator, extnum, config):
for start, tag, data, pos in iterator:
if tag == "STREAM":
if start:
warn_unknown_attrs(
"STREAM",
data.keys(),
config,
pos,
["type", "href", "actuate", "encoding", "expires", "rights"],
)
href = data["href"]
encoding = data.get("encoding", None)
else:
break
if not href.startswith(("http", "ftp", "file")):
vo_raise(
"The vo package only supports remote data through http, ftp or file",
self._config,
self._pos,
NotImplementedError,
)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == "gzip":
fd = gzip.GzipFile(href, "r", fileobj=fd)
elif encoding == "base64":
fd = codecs.EncodedFile(fd, "base64")
else:
vo_raise(
f"Unknown encoding type '{encoding}'",
self._config,
self._pos,
NotImplementedError,
)
hdulist = fits.open(fd)
array = hdulist[int(extnum)].data
if array.dtype != self.array.dtype:
warn_or_raise(W19, W19, (), self._config, self._pos)
return array
def to_xml(self, w, **kwargs):
specified_format = kwargs.get("tabledata_format")
if specified_format is not None:
format = specified_format
else:
format = self.format
if format == "fits":
format = "tabledata"
with w.tag(
"TABLE",
attrib=w.object_attrs(self, ("ID", "name", "ref", "ucd", "utype", "nrows")),
):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.fields, self.params):
for element in element_set:
element._setup({}, None)
if self.ref is None:
for element_set in (self.fields, self.params, self.groups, self.links):
for element in element_set:
element.to_xml(w, **kwargs)
elif kwargs["version_1_2_or_later"]:
index = list(self._votable.iter_tables()).index(self)
group = Group(self, ID=f"_g{index}")
group.to_xml(w, **kwargs)
if len(self.array):
with w.tag("DATA"):
if format == "tabledata":
self._write_tabledata(w, **kwargs)
elif format == "binary":
self._write_binary(1, w, **kwargs)
elif format == "binary2":
self._write_binary(2, w, **kwargs)
if kwargs["version_1_2_or_later"]:
for element in self._infos:
element.to_xml(w, **kwargs)
def _write_tabledata(self, w, **kwargs):
fields = self.fields
array = self.array
with w.tag("TABLEDATA"):
w._flush()
if _has_c_tabledata_writer and not kwargs.get("_debug_python_based_parser"):
supports_empty_values = [
field.converter.supports_empty_values(kwargs) for field in fields
]
fields = [field.converter.output for field in fields]
indent = len(w._tags) - 1
tablewriter.write_tabledata(
w.write,
array.data,
array.mask,
fields,
supports_empty_values,
indent,
1 << 8,
)
else:
write = w.write
indent_spaces = w.get_indentation_spaces()
tr_start = indent_spaces + "<TR>\n"
tr_end = indent_spaces + "</TR>\n"
td = indent_spaces + " <TD>{}</TD>\n"
td_empty = indent_spaces + " <TD/>\n"
fields = [
(
i,
field.converter.output,
field.converter.supports_empty_values(kwargs),
)
for i, field in enumerate(fields)
]
for row in range(len(array)):
write(tr_start)
array_row = array.data[row]
mask_row = array.mask[row]
for i, output, supports_empty_values in fields:
data = array_row[i]
masked = mask_row[i]
if supports_empty_values and np.all(masked):
write(td_empty)
else:
try:
val = output(data, masked)
except Exception as e:
vo_reraise(
e,
additional="(in row {:d}, col '{}')".format(
row, self.fields[i].ID
),
)
if len(val):
write(td.format(val))
else:
write(td_empty)
write(tr_end)
def _write_binary(self, mode, w, **kwargs):
fields = self.fields
array = self.array
if mode == 1:
tag_name = "BINARY"
else:
tag_name = "BINARY2"
with w.tag(tag_name):
with w.tag("STREAM", encoding="base64"):
fields_basic = [
(i, field.converter.binoutput) for (i, field) in enumerate(fields)
]
data = io.BytesIO()
for row in range(len(array)):
array_row = array.data[row]
array_mask = array.mask[row]
if mode == 2:
flattened = np.array([np.all(x) for x in array_mask])
data.write(converters.bool_to_bitarray(flattened))
for i, converter in fields_basic:
try:
chunk = converter(array_row[i], array_mask[i])
assert type(chunk) == bytes
except Exception as e:
vo_reraise(
e, additional=f"(in row {row:d}, col '{fields[i].ID}')"
)
data.write(chunk)
w._flush()
w.write(base64.b64encode(data.getvalue()).decode("ascii"))
def to_table(self, use_names_over_ids=False):
"""
Convert this VO Table to an `astropy.table.Table` instance.
Parameters
----------
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the
names of columns in the `astropy.table.Table` instance.
Since names are not guaranteed to be unique, this may cause
some columns to be renamed by appending numbers to the end.
Otherwise (default), use the ID attributes as the column
names.
.. warning::
Variable-length array fields may not be restored
identically when round-tripping through the
`astropy.table.Table` instance.
"""
from astropy.table import Table
meta = {}
for key in ["ID", "name", "ref", "ucd", "utype", "description"]:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if use_names_over_ids:
names = [field.name for field in self.fields]
unique_names = []
for i, name in enumerate(names):
new_name = name
i = 2
while new_name in unique_names:
new_name = f"{name}{i}"
i += 1
unique_names.append(new_name)
names = unique_names
else:
names = [field.ID for field in self.fields]
table = Table(self.array, names=names, meta=meta)
for name, field in zip(names, self.fields):
column = table[name]
field.to_table_column(column)
return table
@classmethod
def from_table(cls, votable, table):
"""
Create a `Table` instance from a given `astropy.table.Table`
instance.
"""
kwargs = {}
for key in ["ID", "name", "ref", "ucd", "utype"]:
val = table.meta.get(key)
if val is not None:
kwargs[key] = val
new_table = cls(votable, **kwargs)
if "description" in table.meta:
new_table.description = table.meta["description"]
for colname in table.colnames:
column = table[colname]
new_table.fields.append(Field.from_table_column(votable, column))
if table.mask is None:
new_table.array = ma.array(np.asarray(table))
else:
new_table.array = ma.array(np.asarray(table), mask=np.asarray(table.mask))
return new_table
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD and PARAM elements in the
TABLE.
"""
yield from self.params
yield from self.fields
for group in self.groups:
yield from group.iter_fields_and_params()
get_field_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_fields_and_params",
"FIELD or PARAM",
"""
Looks up a FIELD or PARAM element by the given ID.
""",
)
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
"iter_fields_and_params",
"FIELD or PARAM",
"""
Looks up a FIELD or PARAM element by the given ID or name.
""",
)
get_fields_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_fields_and_params",
"FIELD or PARAM",
"""
Looks up a FIELD or PARAM element by the given utype and
returns an iterator emitting all matches.
""",
)
def iter_groups(self):
"""
Recursively iterate over all GROUP elements in the TABLE.
"""
for group in self.groups:
yield group
yield from group.iter_groups()
get_group_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_groups",
"GROUP",
"""
Looks up a GROUP element by the given ID. Used by the group's
"ref" attribute
""",
)
get_groups_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_groups",
"GROUP",
"""
Looks up a GROUP element by the given utype and returns an
iterator emitting all matches.
""",
)
def iter_info(self):
yield from self.infos
class Resource(
Element, _IDProperty, _NameProperty, _UtypeProperty, _DescriptionProperty
):
"""
RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(
self,
name=None,
ID=None,
utype=None,
type="results",
id=None,
config=None,
pos=None,
**kwargs,
):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.name = name
self.ID = resolve_id(ID, id, config, pos)
self.utype = utype
self.type = type
self._extra_attributes = kwargs
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._groups = HomogeneousList(Group)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._links = HomogeneousList(Link)
self._tables = HomogeneousList(Table)
self._resources = HomogeneousList(Resource)
warn_unknown_attrs("RESOURCE", kwargs.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
w = XMLWriter(buff)
w.element(self._element_name, attrib=w.object_attrs(self, self._attr_list))
return buff.getvalue().strip()
@property
def type(self):
"""The type of the resource [*required*].
Must be either:
- 'results': This resource contains actual result values
(default)
- 'meta': This resource contains only datatype descriptions
(FIELD_ elements), but no actual data.
"""
return self._type
@type.setter
def type(self, type):
if type not in ("results", "meta"):
vo_raise(E18, type, self._config, self._pos)
self._type = type
@property
def extra_attributes(self):
"""Dictionary of extra attributes of the RESOURCE_ element.
This is dictionary of string keys to string values containing any
extra attributes of the RESOURCE_ element that are not defined
in the specification. The specification explicitly allows
for extra attributes here, but nowhere else.
"""
return self._extra_attributes
@property
def coordinate_systems(self):
"""
A list of coordinate system definitions (COOSYS_ elements) for
the RESOURCE_. Must contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system definitions (TIMESYS_ elements) for
the RESOURCE_. Must contain only `TimeSys` objects.
"""
return self._time_systems
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
resource. Must only contain `Info` objects.
"""
return self._infos
@property
def groups(self):
"""
A list of groups.
"""
return self._groups
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
resource. Must contain only `Param` objects.
"""
return self._params
@property
def links(self):
"""
A list of links (pointers to other documents or servers
through a URI) for the resource. Must contain only `Link`
objects.
"""
return self._links
@property
def tables(self):
"""
A list of tables in the resource. Must contain only
`Table` objects.
"""
return self._tables
@property
def resources(self):
"""
A list of nested resources inside this resource. Must contain
only `Resource` objects.
"""
return self._resources
def _add_table(self, iterator, tag, data, config, pos):
table = Table(self._votable, config=config, pos=pos, **data)
self.tables.append(table)
table.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self._votable, iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def parse(self, votable, iterator, config):
self._votable = votable
tag_mapping = {
"TABLE": self._add_table,
"INFO": self._add_info,
"PARAM": self._add_param,
"GROUP": self._add_group,
"COOSYS": self._add_coosys,
"TIMESYS": self._add_timesys,
"RESOURCE": self._add_resource,
"LINK": self._add_link,
"DESCRIPTION": self._ignore_add,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
elif tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "RESOURCE", config, pos)
self.description = data or None
elif tag == "RESOURCE":
break
del self._votable
return self
def to_xml(self, w, **kwargs):
attrs = w.object_attrs(self, ("ID", "type", "utype"))
attrs.update(self.extra_attributes)
with w.tag("RESOURCE", attrib=attrs):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (
self.coordinate_systems,
self.time_systems,
self.params,
self.infos,
self.links,
self.tables,
self.resources,
):
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Recursively iterates over all tables in the resource and
nested resources.
"""
yield from self.tables
for resource in self.resources:
yield from resource.iter_tables()
def iter_fields_and_params(self):
"""
Recursively iterates over all FIELD_ and PARAM_ elements in
the resource, its tables and nested resources.
"""
yield from self.params
for table in self.tables:
yield from table.iter_fields_and_params()
for resource in self.resources:
yield from resource.iter_fields_and_params()
def iter_coosys(self):
"""
Recursively iterates over all the COOSYS_ elements in the
resource and nested resources.
"""
yield from self.coordinate_systems
for resource in self.resources:
yield from resource.iter_coosys()
def iter_timesys(self):
"""
Recursively iterates over all the TIMESYS_ elements in the
resource and nested resources.
"""
yield from self.time_systems
for resource in self.resources:
yield from resource.iter_timesys()
def iter_info(self):
"""
Recursively iterates over all the INFO_ elements in the
resource and nested resources.
"""
yield from self.infos
for table in self.tables:
yield from table.iter_info()
for resource in self.resources:
yield from resource.iter_info()
class VOTableFile(Element, _IDProperty, _DescriptionProperty):
"""
VOTABLE_ element: represents an entire file.
The keyword arguments correspond to setting members of the same
name, documented below.
*version* is settable at construction time only, since conformance
tests for building the rest of the structure depend on it.
"""
def __init__(self, ID=None, id=None, config=None, pos=None, version="1.4"):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._time_systems = HomogeneousList(TimeSys)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._resources = HomogeneousList(Resource)
self._groups = HomogeneousList(Group)
version = str(version)
if version == "1.0":
warnings.warn(
"VOTable 1.0 support is deprecated in astropy 4.3 and will be "
"removed in a future release",
AstropyDeprecationWarning,
)
elif (version != "1.0") and (version not in self._version_namespace_map):
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(f"'version' should be in ('1.0', '{allowed_from_map}').")
self._version = version
def __repr__(self):
n_tables = len(list(self.iter_tables()))
return f"<VOTABLE>... {n_tables} tables ...</VOTABLE>"
@property
def version(self):
"""
The version of the VOTable specification that the file uses.
"""
return self._version
@version.setter
def version(self, version):
version = str(version)
if version not in self._version_namespace_map:
allowed_from_map = "', '".join(self._version_namespace_map)
raise ValueError(
"astropy.io.votable only supports VOTable versions"
f" '{allowed_from_map}'"
)
self._version = version
@property
def coordinate_systems(self):
"""
A list of coordinate system descriptions for the file. Must
contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def time_systems(self):
"""
A list of time system descriptions for the file. Must
contain only `TimeSys` objects.
"""
return self._time_systems
@property
def params(self):
"""
A list of parameters (constant-valued columns) that apply to
the entire file. Must contain only `Param` objects.
"""
return self._params
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
entire file. Must only contain `Info` objects.
"""
return self._infos
@property
def resources(self):
"""
A list of resources, in the order they appear in the file.
Must only contain `Resource` objects.
"""
return self._resources
@property
def groups(self):
"""
A list of groups, in the order they appear in the file. Only
supported as a child of the VOTABLE element in VOTable 1.2 or
later.
"""
return self._groups
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self, iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_timesys(self, iterator, tag, data, config, pos):
timesys = TimeSys(config=config, pos=pos, **data)
self.time_systems.append(timesys)
timesys.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
if not config.get("version_1_2_or_later"):
warn_or_raise(W26, W26, ("GROUP", "VOTABLE", "1.2"), config, pos)
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _get_version_checks(self):
config = {}
config["version_1_1_or_later"] = util.version_compare(self.version, "1.1") >= 0
config["version_1_2_or_later"] = util.version_compare(self.version, "1.2") >= 0
config["version_1_3_or_later"] = util.version_compare(self.version, "1.3") >= 0
config["version_1_4_or_later"] = util.version_compare(self.version, "1.4") >= 0
return config
# Map VOTable version numbers to namespace URIs and schema information.
_version_namespace_map = {
# Version 1.0 isn't well-supported, but is allowed on parse (with a warning).
# It used DTD rather than schema, so this information would not be useful.
# By omitting 1.0 from this dict we can use the keys as the list of versions
# that are allowed in various other checks.
"1.1": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.1",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.1",
},
"1.2": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.2",
"schema_location_attr": "xsi:noNamespaceSchemaLocation",
"schema_location_value": "http://www.ivoa.net/xml/VOTable/v1.2",
},
# With 1.3 we'll be more explicit with the schema location.
# - xsi:schemaLocation uses the namespace name along with the URL
# to reference it.
# - For convenience, but somewhat confusingly, the namespace URIs
# are also usable URLs for accessing an applicable schema.
# However to avoid confusion, we'll use the explicit schema URL.
"1.3": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value": (
"http://www.ivoa.net/xml/VOTable/v1.3"
" http://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"
),
},
# With 1.4 namespace URIs stopped incrementing with minor version changes
# so we use the same URI as with 1.3. See this IVOA note for more info:
# http://www.ivoa.net/documents/Notes/XMLVers/20180529/
"1.4": {
"namespace_uri": "http://www.ivoa.net/xml/VOTable/v1.3",
"schema_location_attr": "xsi:schemaLocation",
"schema_location_value": (
"http://www.ivoa.net/xml/VOTable/v1.3"
" http://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"
),
},
}
def parse(self, iterator, config):
config["_current_table_number"] = 0
for start, tag, data, pos in iterator:
if start:
if tag == "xml":
pass
elif tag == "VOTABLE":
if "version" not in data:
warn_or_raise(W20, W20, self.version, config, pos)
config["version"] = self.version
else:
config["version"] = self._version = data["version"]
if config["version"].lower().startswith("v"):
warn_or_raise(W29, W29, config["version"], config, pos)
self._version = config["version"] = config["version"][1:]
if config["version"] not in self._version_namespace_map:
vo_warn(W21, config["version"], config, pos)
if "xmlns" in data:
ns_info = self._version_namespace_map.get(config["version"], {})
correct_ns = ns_info.get("namespace_uri")
if data["xmlns"] != correct_ns:
vo_warn(W41, (correct_ns, data["xmlns"]), config, pos)
else:
vo_warn(W42, (), config, pos)
break
else:
vo_raise(E19, (), config, pos)
config.update(self._get_version_checks())
tag_mapping = {
"PARAM": self._add_param,
"RESOURCE": self._add_resource,
"COOSYS": self._add_coosys,
"TIMESYS": self._add_timesys,
"INFO": self._add_info,
"DEFINITIONS": self._add_definitions,
"DESCRIPTION": self._ignore_add,
"GROUP": self._add_group,
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos
)
elif tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, "VOTABLE", config, pos)
self.description = data or None
if not len(self.resources) and config["version_1_2_or_later"]:
warn_or_raise(W53, W53, (), config, pos)
return self
def to_xml(
self,
fd,
compressed=False,
tabledata_format=None,
_debug_python_based_parser=False,
_astropy_version=None,
):
"""
Write to an XML file.
Parameters
----------
fd : str or file-like
Where to write the file. If a file-like object, must be writable.
compressed : bool, optional
When `True`, write to a gzip-compressed file. (Default:
`False`)
tabledata_format : str, optional
Override the format of the table(s) data to write. Must
be one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified
in each `Table` object as it was created or read in. See
:ref:`astropy:votable-serialization`.
"""
if tabledata_format is not None:
if tabledata_format.lower() not in ("tabledata", "binary", "binary2"):
raise ValueError(f"Unknown format type '{format}'")
kwargs = {
"version": self.version,
"tabledata_format": tabledata_format,
"_debug_python_based_parser": _debug_python_based_parser,
"_group_number": 1,
}
kwargs.update(self._get_version_checks())
with util.convert_to_writable_filelike(fd, compressed=compressed) as fd:
w = XMLWriter(fd)
version = self.version
if _astropy_version is None:
lib_version = astropy_version
else:
lib_version = _astropy_version
xml_header = """
<?xml version="1.0" encoding="utf-8"?>
<!-- Produced with astropy.io.votable version {lib_version}
http://www.astropy.org/ -->\n"""
w.write(xml_header.lstrip().format(**locals()))
# Build the VOTABLE tag attributes.
votable_attr = {
"version": version,
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
ns_info = self._version_namespace_map.get(version, {})
namespace_uri = ns_info.get("namespace_uri")
if namespace_uri:
votable_attr["xmlns"] = namespace_uri
schema_location_attr = ns_info.get("schema_location_attr")
schema_location_value = ns_info.get("schema_location_value")
if schema_location_attr and schema_location_value:
votable_attr[schema_location_attr] = schema_location_value
with w.tag("VOTABLE", votable_attr):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
element_sets = [
self.coordinate_systems,
self.time_systems,
self.params,
self.infos,
self.resources,
]
if kwargs["version_1_2_or_later"]:
element_sets[0] = self.groups
for element_set in element_sets:
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Iterates over all tables in the VOTable file in a "flat" way,
ignoring the nesting of resources etc.
"""
for resource in self.resources:
yield from resource.iter_tables()
def get_first_table(self):
"""
Often, you know there is only one table in the file, and
that's all you need. This method returns that first table.
"""
for table in self.iter_tables():
if not table.is_empty():
return table
raise IndexError("No table found in VOTABLE file.")
get_table_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_tables",
"TABLE",
"""
Looks up a TABLE_ element by the given ID. Used by the table
"ref" attribute.
""",
)
get_tables_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_tables",
"TABLE",
"""
Looks up a TABLE_ element by the given utype, and returns an
iterator emitting all matches.
""",
)
def get_table_by_index(self, idx):
"""
Get a table by its ordinal position in the file.
"""
for i, table in enumerate(self.iter_tables()):
if i == idx:
return table
raise IndexError(f"No table at index {idx:d} found in VOTABLE file.")
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD_ and PARAM_ elements in the
VOTABLE_ file.
"""
for resource in self.resources:
yield from resource.iter_fields_and_params()
get_field_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_fields_and_params",
"FIELD",
"""
Looks up a FIELD_ element by the given ID_. Used by the field's
"ref" attribute.
""",
)
get_fields_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_fields_and_params",
"FIELD",
"""
Looks up a FIELD_ element by the given utype and returns an
iterator emitting all matches.
""",
)
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
"iter_fields_and_params",
"FIELD",
"""
Looks up a FIELD_ element by the given ID_ or name.
""",
)
def iter_values(self):
"""
Recursively iterate over all VALUES_ elements in the VOTABLE_
file.
"""
for field in self.iter_fields_and_params():
yield field.values
get_values_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_values",
"VALUES",
"""
Looks up a VALUES_ element by the given ID. Used by the values
"ref" attribute.
""",
)
def iter_groups(self):
"""
Recursively iterate over all GROUP_ elements in the VOTABLE_
file.
"""
for table in self.iter_tables():
yield from table.iter_groups()
get_group_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_groups",
"GROUP",
"""
Looks up a GROUP_ element by the given ID. Used by the group's
"ref" attribute
""",
)
get_groups_by_utype = _lookup_by_attr_factory(
"utype",
False,
"iter_groups",
"GROUP",
"""
Looks up a GROUP_ element by the given utype and returns an
iterator emitting all matches.
""",
)
def iter_coosys(self):
"""
Recursively iterate over all COOSYS_ elements in the VOTABLE_
file.
"""
yield from self.coordinate_systems
for resource in self.resources:
yield from resource.iter_coosys()
get_coosys_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_coosys",
"COOSYS",
"""Looks up a COOSYS_ element by the given ID.""",
)
def iter_timesys(self):
"""
Recursively iterate over all TIMESYS_ elements in the VOTABLE_
file.
"""
yield from self.time_systems
for resource in self.resources:
yield from resource.iter_timesys()
get_timesys_by_id = _lookup_by_attr_factory(
"ID",
True,
"iter_timesys",
"TIMESYS",
"""Looks up a TIMESYS_ element by the given ID.""",
)
def iter_info(self):
"""
Recursively iterate over all INFO_ elements in the VOTABLE_
file.
"""
yield from self.infos
for resource in self.resources:
yield from resource.iter_info()
get_info_by_id = _lookup_by_attr_factory(
"ID", True, "iter_info", "INFO", """Looks up a INFO element by the given ID."""
)
get_infos_by_name = _lookup_by_attr_factory(
"name",
False,
"iter_info",
"INFO",
"""Returns all INFO children with the given name.""",
)
def set_all_tables_format(self, format):
"""
Set the output storage format of all tables in the file.
"""
for table in self.iter_tables():
table.format = format
@classmethod
def from_table(cls, table, table_id=None):
"""
Create a `VOTableFile` instance from a given
`astropy.table.Table` instance.
Parameters
----------
table_id : str, optional
Set the given ID attribute on the returned Table instance.
"""
votable_file = cls()
resource = Resource()
votable = Table.from_table(votable_file, table)
if table_id is not None:
votable.ID = table_id
resource.tables.append(votable)
votable_file.resources.append(resource)
return votable_file
|
a4bcc13248940c0560460b4dcc790b745d35c52ddff309c6cadd405ca4eea3fb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains a contains the high-level functions to read a
VOTable file.
"""
# STDLIB
import io
import os
import sys
import textwrap
import warnings
from astropy.utils import data
from astropy.utils.xml import iterparser
# LOCAL
from . import exceptions, tree
__all__ = [
"parse",
"parse_single_table",
"from_table",
"writeto",
"validate",
"reset_vo_warnings",
]
def parse(
source,
columns=None,
invalid="exception",
verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE,
table_number=None,
table_id=None,
filename=None,
unit_format=None,
datatype_mapping=None,
_debug_python_based_parser=False,
):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
.. versionchanged:: 6.0
The ``pedantic`` argument is removed.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base instance or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.UnitBase` instance. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import conf, VERIFY_OPTIONS
invalid = invalid.lower()
if invalid not in ("exception", "mask"):
raise ValueError(
"accepted values of ``invalid`` are: ``'exception'`` or ``'mask'``."
)
if verify is None:
verify = conf.verify
elif verify not in VERIFY_OPTIONS:
raise ValueError(f"verify should be one of {'/'.join(VERIFY_OPTIONS)}")
if datatype_mapping is None:
datatype_mapping = {}
config = {
"columns": columns,
"invalid": invalid,
"verify": verify,
"chunk_size": chunk_size,
"table_number": table_number,
"filename": filename,
"unit_format": unit_format,
"datatype_mapping": datatype_mapping,
}
if isinstance(source, str):
source = os.path.expanduser(source)
if filename is None and isinstance(source, str):
config["filename"] = source
with iterparser.get_xml_iterator(
source, _debug_python_based_parser=_debug_python_based_parser
) as iterator:
return tree.VOTableFile(config=config, pos=(1, 1)).parse(iterator, config)
def parse_single_table(source, **kwargs):
"""
Parses a VOTABLE_ xml file (or file-like object), reading and
returning only the first `~astropy.io.votable.tree.Table`
instance.
See `parse` for a description of the keyword arguments.
Returns
-------
votable : `~astropy.io.votable.tree.Table` object
"""
if kwargs.get("table_number") is None:
kwargs["table_number"] = 0
votable = parse(source, **kwargs)
return votable.get_first_table()
def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or writable file-like
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`astropy:astropy:votable-serialization`.
"""
from astropy.table import Table
if isinstance(table, Table):
table = tree.VOTableFile.from_table(table)
elif not isinstance(table, tree.VOTableFile):
raise TypeError(
"first argument must be astropy.io.vo.VOTableFile or "
"astropy.table.Table instance"
)
table.to_xml(
file, tabledata_format=tabledata_format, _debug_python_based_parser=True
)
def validate(source, output=sys.stdout, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : path-like or file-like
Path to a VOTABLE_ xml file or `~pathlib.Path`
object having Path to a VOTABLE_ xml file.
If file-like object, must be readable.
output : file-like, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
Must be writable.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
from astropy.utils.console import color_print, print_code_line
return_as_str = False
if output is None:
output = io.StringIO()
return_as_str = True
lines = []
votable = None
reset_vo_warnings()
if isinstance(source, str):
source = os.path.expanduser(source)
with data.get_readable_fileobj(source, encoding="binary") as fd:
content = fd.read()
content_buffer = io.BytesIO(content)
content_buffer.seek(0)
if filename is None:
if isinstance(source, str):
filename = source
elif hasattr(source, "name"):
filename = source.name
elif hasattr(source, "url"):
filename = source.url
else:
filename = "<unknown>"
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", exceptions.VOWarning, append=True)
try:
votable = parse(content_buffer, verify="warn", filename=filename)
except ValueError as e:
lines.append(str(e))
lines = [
str(x.message)
for x in warning_lines
if issubclass(x.category, exceptions.VOWarning)
] + lines
content_buffer.seek(0)
output.write(f"Validation report for {filename}\n\n")
if len(lines):
xml_lines = iterparser.xml_readlines(content_buffer)
for warning in lines:
w = exceptions.parse_vowarning(warning)
if not w["is_something"]:
output.write(w["message"])
output.write("\n\n")
else:
line = xml_lines[w["nline"] - 1]
warning = w["warning"]
if w["is_warning"]:
color = "yellow"
else:
color = "red"
color_print(
f"{w['nline']:d}: ",
"",
warning or "EXC",
color,
": ",
"",
textwrap.fill(
w["message"],
initial_indent=" ",
subsequent_indent=" ",
).lstrip(),
file=output,
)
print_code_line(line, w["nchar"], file=output)
output.write("\n")
else:
output.write("astropy.io.votable found no violations.\n\n")
success = 0
if xmllint and os.path.exists(filename):
from . import xmlutil
if votable is None:
version = "1.1"
else:
version = votable.version
success, stdout, stderr = xmlutil.validate_schema(filename, version)
if success != 0:
output.write("xmllint schema violations:\n\n")
output.write(stderr.decode("utf-8"))
else:
output.write("xmllint passed\n")
if return_as_str:
return output.getvalue()
return len(lines) == 0 and success == 0
def from_table(table, table_id=None):
"""
Given an `~astropy.table.Table` object, return a
`~astropy.io.votable.tree.VOTableFile` file structure containing
just that single table.
Parameters
----------
table : `~astropy.table.Table` instance
table_id : str, optional
If not `None`, set the given id on the returned
`~astropy.io.votable.tree.Table` instance.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` instance
"""
return tree.VOTableFile.from_table(table, table_id=table_id)
def is_votable(source):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
source : path-like or file-like
Path or file object containing a VOTABLE_ xml file.
If file, must be readable.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
if isinstance(source, str):
source = os.path.expanduser(source)
try:
with iterparser.get_xml_iterator(source) as iterator:
for start, tag, d, pos in iterator:
if tag != "xml":
return False
break
for start, tag, d, pos in iterator:
if tag != "VOTABLE":
return False
break
return True
except ValueError:
return False
def reset_vo_warnings():
"""
Resets all of the vo warning state so that warnings that
have already been emitted will be emitted again. This is
used, for example, by `validate` which must emit all
warnings each time it is called.
"""
from . import converters, xmlutil
# -----------------------------------------------------------#
# This is a special variable used by the Python warnings #
# infrastructure to keep track of warnings that have #
# already been seen. Since we want to get every single #
# warning out of this, we have to delete all of them first. #
# -----------------------------------------------------------#
for module in (converters, exceptions, tree, xmlutil):
try:
del module.__warningregistry__
except AttributeError:
pass
|
c4d0bb2faf901e62e8910e1bdb9c912d55907e31d528e5d9f3ba9168226dc549 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package reads and writes data formats used by the Virtual
Observatory (VO) initiative, particularly the VOTable XML format.
"""
from astropy import config as _config
from .exceptions import (
IOWarning,
UnimplementedWarning,
VOTableChangeWarning,
VOTableSpecError,
VOTableSpecWarning,
VOWarning,
)
from .table import from_table, is_votable, parse, parse_single_table, validate, writeto
__all__ = [
"Conf",
"conf",
"parse",
"parse_single_table",
"validate",
"from_table",
"is_votable",
"writeto",
"VOWarning",
"VOTableChangeWarning",
"VOTableSpecWarning",
"UnimplementedWarning",
"IOWarning",
"VOTableSpecError",
]
VERIFY_OPTIONS = ["ignore", "warn", "exception"] # First one is default
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable`.
"""
verify = _config.ConfigItem(
VERIFY_OPTIONS,
"Can be 'exception' (treat fixable violations of the VOTable spec as "
"exceptions), 'warn' (show warnings for VOTable spec violations), or "
"'ignore' (silently ignore VOTable spec violations)",
)
conf = Conf()
|
853c004f372f1cc7f2d06428bec4bdf83ef39b33bdbe0fc31936350ca896743f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""VOTable exceptions and warnings.
.. _warnings:
Warnings
--------
.. note::
Most of the following warnings indicate violations of the VOTable
specification. They should be reported to the authors of the
tools that produced the VOTable file.
To control the warnings emitted, use the standard Python
:mod:`warnings` module and the ``astropy.io.votable.exceptions.conf.max_warnings``
configuration item. Most of these are of the type `VOTableSpecWarning`.
{warnings}
.. _exceptions:
Exceptions
----------
.. note::
This is a list of many of the fatal exceptions emitted by ``astropy.io.votable``
when the file does not conform to spec. Other exceptions may be
raised due to unforeseen cases or bugs in ``astropy.io.votable`` itself.
{exceptions}
"""
# STDLIB
import io
import re
from textwrap import dedent
from warnings import warn
from astropy import config as _config
from astropy.utils.exceptions import AstropyWarning
__all__ = [
"Conf",
"conf",
"warn_or_raise",
"vo_raise",
"vo_reraise",
"vo_warn",
"warn_unknown_attrs",
"parse_vowarning",
"VOWarning",
"VOTableChangeWarning",
"VOTableSpecWarning",
"UnimplementedWarning",
"IOWarning",
"VOTableSpecError",
]
# NOTE: Cannot put this in __init__.py due to circular import.
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.votable.exceptions`.
"""
max_warnings = _config.ConfigItem(
10,
"Number of times the same type of warning is displayed before being suppressed",
cfgtype="integer",
)
conf = Conf()
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ("?", "?")
filename = config.get("filename", "?")
return f"{filename}:{pos[0]}:{pos[1]}: {name}: {message}"
def _suppressed_warning(warning, config, stacklevel=2):
warning_class = type(warning)
config.setdefault("_warning_counts", {}).setdefault(warning_class, 0)
config["_warning_counts"][warning_class] += 1
message_count = config["_warning_counts"][warning_class]
if message_count <= conf.max_warnings:
if message_count == conf.max_warnings:
warning.formatted_message += (
" (suppressing further warnings of this type...)"
)
warn(warning, stacklevel=stacklevel + 1)
def warn_or_raise(
warning_class, exception_class=None, args=(), config=None, pos=None, stacklevel=1
):
"""
Warn or raise an exception, depending on the verify setting.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
config_value = config.get("verify", "warn")
if config_value == "exception":
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
elif config_value == "warn":
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel + 1)
def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos)
def vo_reraise(exc, config=None, pos=None, additional=""):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += " " + additional
exc.args = (message,)
raise exc
def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
if config.get("verify", "warn") != "ignore":
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel + 1)
def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1):
for attr in attrs:
if attr not in good_attr:
vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel + 1)
_warning_pat = re.compile(
r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): "
+ r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$"
)
def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result["warning"] = warning = match.group("warning")
if warning is not None:
result["is_warning"] = warning[0].upper() == "W"
result["is_exception"] = not result["is_warning"]
result["number"] = int(match.group("warning")[1:])
result["doc_url"] = f"io/votable/api_exceptions.html#{warning.lower()}"
else:
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = True
result["number"] = None
result["doc_url"] = None
try:
result["nline"] = int(match.group("nline"))
except ValueError:
result["nline"] = 0
try:
result["nchar"] = int(match.group("nchar"))
except ValueError:
result["nchar"] = 0
result["message"] = match.group("rest")
result["is_something"] = True
else:
result["warning"] = None
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = False
result["is_something"] = False
if not isinstance(line, str):
line = line.decode("utf-8")
result["message"] = line
return result
class VOWarning(AstropyWarning):
"""
The base class of all VO warnings and exceptions.
Handles the formatting of the message with a warning or exception
code, filename, line and column number.
"""
default_args = ()
message_template = ""
def __init__(self, args, config=None, pos=None):
if config is None:
config = {}
if not isinstance(args, tuple):
args = (args,)
msg = self.message_template.format(*args)
self.formatted_message = _format_message(
msg, self.__class__.__name__, config, pos
)
Warning.__init__(self, self.formatted_message)
def __str__(self):
return self.formatted_message
@classmethod
def get_short_name(cls):
if len(cls.default_args):
return cls.message_template.format(*cls.default_args)
return cls.message_template
class VOTableChangeWarning(VOWarning, SyntaxWarning):
"""
A change has been made to the input XML file.
"""
class VOTableSpecWarning(VOWarning, SyntaxWarning):
"""
The input XML file violates the spec, but there is an obvious workaround.
"""
class UnimplementedWarning(VOWarning, SyntaxWarning):
"""
A feature of the VOTABLE_ spec is not implemented.
"""
class IOWarning(VOWarning, RuntimeWarning):
"""
A network or IO error occurred, but was recovered using the cache.
"""
class VOTableSpecError(VOWarning, ValueError):
"""
The input XML file violates the spec and there is no good workaround.
"""
class W01(VOTableSpecWarning):
"""Array uses commas rather than whitespace.
The VOTable spec states:
If a cell contains an array or complex number, it should be
encoded as multiple numbers separated by whitespace.
Many VOTable files in the wild use commas as a separator instead,
and ``astropy.io.votable`` can support this convention depending on the
:ref:`astropy:verifying-votables` setting.
``astropy.io.votable`` always outputs files using only spaces, regardless of
how they were input.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__
"""
message_template = "Array uses commas rather than whitespace"
class W02(VOTableSpecWarning):
r"""Nonstandard XML id.
XML ids must match the following regular expression::
^[A-Za-z_][A-Za-z0-9_\.\-]*$
The VOTable 1.1 says the following:
According to the XML standard, the attribute ``ID`` is a
string beginning with a letter or underscore (``_``), followed
by a sequence of letters, digits, or any of the punctuation
characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or
``:`` (colon).
However, this is in conflict with the XML standard, which says
colons may not be used. VOTable 1.1's own schema does not allow a
colon here. Therefore, ``astropy.io.votable`` disallows the colon.
VOTable 1.2 corrects this error in the specification.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`XML Names <https://www.w3.org/TR/xml-names/>`__
"""
message_template = "{} attribute '{}' is invalid. Must be a standard XML id"
default_args = ("x", "y")
class W03(VOTableChangeWarning):
"""Implicitly generating an ID from a name.
The VOTable 1.1 spec says the following about ``name`` vs. ``ID``
on ``FIELD`` and ``VALUE`` elements:
``ID`` and ``name`` attributes have a different role in
VOTable: the ``ID`` is meant as a *unique identifier* of an
element seen as a VOTable component, while the ``name`` is
meant for presentation purposes, and need not to be unique
throughout the VOTable document. The ``ID`` attribute is
therefore required in the elements which have to be
referenced, but in principle any element may have an ``ID``
attribute. ... In summary, the ``ID`` is different from the
``name`` attribute in that (a) the ``ID`` attribute is made
from a restricted character set, and must be unique throughout
a VOTable document whereas names are standard XML attributes
and need not be unique; and (b) there should be support in the
parsing software to look up references and extract the
relevant element with matching ``ID``.
It is further recommended in the VOTable 1.2 spec:
While the ``ID`` attribute has to be unique in a VOTable
document, the ``name`` attribute need not. It is however
recommended, as a good practice, to assign unique names within
a ``TABLE`` element. This recommendation means that, between a
``TABLE`` and its corresponding closing ``TABLE`` tag,
``name`` attributes of ``FIELD``, ``PARAM`` and optional
``GROUP`` elements should be all different.
Since ``astropy.io.votable`` requires a unique identifier for each of its
columns, ``ID`` is used for the column name when present.
However, when ``ID`` is not present, (since it is not required by
the specification) ``name`` is used instead. However, ``name``
must be cleansed by replacing invalid characters (such as
whitespace) with underscores.
.. note::
This warning does not indicate that the input file is invalid
with respect to the VOTable specification, only that the
column names in the record array may not match exactly the
``name`` attributes specified in the file.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Implicitly generating an ID from a name '{}' -> '{}'"
default_args = ("x", "y")
class W04(VOTableSpecWarning):
"""
The ``content-type`` attribute must use MIME content-type syntax as
defined in `RFC 2046 <https://tools.ietf.org/html/rfc2046>`__.
The current check for validity is somewhat over-permissive.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "content-type '{}' must be a valid MIME content type"
default_args = ("x",)
class W05(VOTableSpecWarning):
"""
The attribute must be a valid URI as defined in `RFC 2396
<https://www.ietf.org/rfc/rfc2396.txt>`_.
"""
message_template = "'{}' is not a valid URI"
default_args = ("x",)
class W06(VOTableSpecWarning):
"""
This warning is emitted when a ``ucd`` attribute does not match
the syntax of a `unified content descriptor
<https://vizier.unistra.fr/doc/UCD.htx>`__.
If the VOTable version is 1.2 or later, the UCD will also be
checked to ensure it conforms to the controlled vocabulary defined
by UCD1+.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:ucd>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:ucd>`__
"""
message_template = "Invalid UCD '{}': {}"
default_args = ("x", "explanation")
class W07(VOTableSpecWarning):
"""Invalid astroYear.
As astro year field is a Besselian or Julian year matching the
regular expression::
^[JB]?[0-9]+([.][0-9]*)?$
Defined in this XML Schema snippet::
<xs:simpleType name="astroYear">
<xs:restriction base="xs:token">
<xs:pattern value="[JB]?[0-9]+([.][0-9]*)?"/>
</xs:restriction>
</xs:simpleType>
"""
message_template = "Invalid astroYear in {}: '{}'"
default_args = ("x", "y")
class W08(VOTableSpecWarning):
"""
To avoid local-dependent number parsing differences, ``astropy.io.votable``
may require a string or unicode string where a numeric type may
make more sense.
"""
message_template = "'{}' must be a str or bytes object"
default_args = ("x",)
class W09(VOTableSpecWarning):
"""
The VOTable specification uses the attribute name ``ID`` (with
uppercase letters) to specify unique identifiers. Some
VOTable-producing tools use the more standard lowercase ``id``
instead. ``astropy.io.votable`` accepts ``id`` and emits this warning if
``verify`` is ``'warn'``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "ID attribute not capitalized"
class W10(VOTableSpecWarning):
"""
The parser has encountered an element that does not exist in the
specification, or appears in an invalid context. Check the file
against the VOTable schema (with a tool such as `xmllint
<http://xmlsoft.org/xmllint.html>`__. If the file validates
against the schema, and you still receive this warning, this may
indicate a bug in ``astropy.io.votable``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Unknown tag '{}'. Ignoring"
default_args = ("x",)
class W11(VOTableSpecWarning):
"""
Earlier versions of the VOTable specification used a ``gref``
attribute on the ``LINK`` element to specify a `GLU reference
<http://aladin.unistra.fr/glu/>`__. New files should
specify a ``glu:`` protocol using the ``href`` attribute.
Since ``astropy.io.votable`` does not currently support GLU references, it
likewise does not automatically convert the ``gref`` attribute to
the new form.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "The gref attribute on LINK is deprecated in VOTable 1.1"
class W12(VOTableChangeWarning):
"""
In order to name the columns of the Numpy record array, each
``FIELD`` element must have either an ``ID`` or ``name`` attribute
to derive a name from. Strictly speaking, according to the
VOTable schema, the ``name`` attribute is required. However, if
``name`` is not present by ``ID`` is, and ``verify`` is not ``'exception'``,
``astropy.io.votable`` will continue without a ``name`` defined.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = (
"'{}' element must have at least one of 'ID' or 'name' attributes"
)
default_args = ("x",)
class W13(VOTableSpecWarning):
"""Invalid VOTable datatype.
Some VOTable files in the wild use non-standard datatype names. These
are mapped to standard ones using the following mapping::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' is not a valid VOTable datatype, should be '{}'"
default_args = ("x", "y")
# W14: Deprecated
class W15(VOTableSpecWarning):
"""
The ``name`` attribute is required on every ``FIELD`` element.
However, many VOTable files in the wild omit it and provide only
an ``ID`` instead. In this case, when ``verify`` is not ``'exception'``
``astropy.io.votable`` will copy the ``name`` attribute to a new ``ID``
attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "{} element missing required 'name' attribute"
default_args = ("x",)
# W16: Deprecated
class W17(VOTableSpecWarning):
"""
A ``DESCRIPTION`` element can only appear once within its parent
element.
According to the schema, it may only occur once (`1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__)
However, it is a `proposed extension
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:addesc>`__
to VOTable 1.2.
"""
message_template = "{} element contains more than one DESCRIPTION element"
default_args = ("x",)
class W18(VOTableSpecWarning):
"""
The number of rows explicitly specified in the ``nrows`` attribute
does not match the actual number of rows (``TR`` elements) present
in the ``TABLE``. This may indicate truncation of the file, or an
internal error in the tool that produced it. If ``verify`` is not
``'exception'``, parsing will proceed, with the loss of some performance.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__
"""
message_template = "TABLE specified nrows={}, but table contains {} rows"
default_args = ("x", "y")
class W19(VOTableSpecWarning):
"""
The column fields as defined using ``FIELD`` elements do not match
those in the headers of the embedded FITS file. If ``verify`` is not
``'exception'``, the embedded FITS file will take precedence.
"""
message_template = (
"The fields defined in the VOTable do not match those in the "
+ "embedded FITS file"
)
class W20(VOTableSpecWarning):
"""
If no version number is explicitly given in the VOTable file, the
parser assumes it is written to the VOTable 1.1 specification.
"""
message_template = "No version number specified in file. Assuming {}"
default_args = ("1.1",)
class W21(UnimplementedWarning):
"""
Unknown issues may arise using ``astropy.io.votable`` with VOTable files
from a version other than 1.1, 1.2, 1.3, or 1.4.
"""
message_template = (
"astropy.io.votable is designed for VOTable version 1.1, 1.2, 1.3,"
" and 1.4, but this file is {}"
)
default_args = ("x",)
class W22(VOTableSpecWarning):
"""
Version 1.0 of the VOTable specification used the ``DEFINITIONS``
element to define coordinate systems. Version 1.1 now uses
``COOSYS`` elements throughout the document.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:definitions>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:definitions>`__
"""
message_template = "The DEFINITIONS element is deprecated in VOTable 1.1. Ignoring"
class W23(IOWarning):
"""
Raised when the VO service database can not be updated (possibly
due to a network outage). This is only a warning, since an older
and possible out-of-date VO service database was available
locally.
"""
message_template = "Unable to update service information for '{}'"
default_args = ("x",)
class W24(VOWarning, FutureWarning):
"""
The VO catalog database retrieved from the www is designed for a
newer version of ``astropy.io.votable``. This may cause problems or limited
features performing service queries. Consider upgrading ``astropy.io.votable``
to the latest version.
"""
message_template = (
"The VO catalog database is for a later version of astropy.io.votable"
)
class W25(IOWarning):
"""
A VO service query failed due to a network error or malformed
arguments. Another alternative service may be attempted. If all
services fail, an exception will be raised.
"""
message_template = "'{}' failed with: {}"
default_args = ("service", "...")
class W26(VOTableSpecWarning):
"""
The given element was not supported inside of the given element
until the specified VOTable version, however the version declared
in the file is for an earlier version. These attributes may not
be written out to the file.
"""
message_template = "'{}' inside '{}' added in VOTable {}"
default_args = ("child", "parent", "X.X")
class W27(VOTableSpecWarning):
"""
The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in
favor of a reference to the Space-Time Coordinate (STC) data
model (see `utype
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__
and the IVOA note `referencing STC in VOTable
<http://ivoa.net/Documents/latest/VOTableSTC.html>`__.
"""
message_template = "COOSYS deprecated in VOTable 1.2"
class W28(VOTableSpecWarning):
"""
The given attribute was not supported on the given element until the
specified VOTable version, however the version declared in the file is
for an earlier version. These attributes may not be written out to
the file.
"""
message_template = "'{}' on '{}' added in VOTable {}"
default_args = ("attribute", "element", "X.X")
class W29(VOTableSpecWarning):
"""
Some VOTable files specify their version number in the form "v1.0",
when the only supported forms in the spec are "1.0".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Version specified in non-standard form '{}'"
default_args = ("v1.0",)
class W30(VOTableSpecWarning):
"""
Some VOTable files write missing floating-point values in non-standard ways,
such as "null" and "-". If ``verify`` is not ``'exception'``, any
non-standard floating-point literals are treated as missing values.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid literal for float '{}'. Treating as empty."
default_args = ("x",)
class W31(VOTableSpecWarning):
"""
Since NaN's can not be represented in integer fields directly, a null
value must be specified in the FIELD descriptor to support reading
NaN's from the tabledata.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "NaN given in an integral field without a specified null value"
class W32(VOTableSpecWarning):
"""
Each field in a table must have a unique ID. If two or more fields
have the same ID, some will be renamed to ensure that all IDs are
unique.
From the VOTable 1.2 spec:
The ``ID`` and ``ref`` attributes are defined as XML types
``ID`` and ``IDREF`` respectively. This means that the
contents of ``ID`` is an identifier which must be unique
throughout a VOTable document, and that the contents of the
``ref`` attribute represents a reference to an identifier
which must exist in the VOTable document.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Duplicate ID '{}' renamed to '{}' to ensure uniqueness"
default_args = ("x", "x_2")
class W33(VOTableChangeWarning):
"""
Each field in a table must have a unique name. If two or more
fields have the same name, some will be renamed to ensure that all
names are unique.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Column name '{}' renamed to '{}' to ensure uniqueness"
default_args = ("x", "x_2")
class W34(VOTableSpecWarning):
"""
The attribute requires the value to be a valid XML token, as
defined by `XML 1.0
<http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__.
"""
message_template = "'{}' is an invalid token for attribute '{}'"
default_args = ("x", "y")
class W35(VOTableSpecWarning):
"""
The ``name`` and ``value`` attributes are required on all ``INFO``
elements.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__
"""
message_template = "'{}' attribute required for INFO elements"
default_args = ("x",)
class W36(VOTableSpecWarning):
"""
If the field specifies a ``null`` value, that value must conform
to the given ``datatype``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "null value '{}' does not match field datatype, setting to 0"
default_args = ("x",)
class W37(UnimplementedWarning):
"""
The 3 datatypes defined in the VOTable specification and supported by
``astropy.io.votable`` are ``TABLEDATA``, ``BINARY`` and ``FITS``.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__
"""
message_template = "Unsupported data format '{}'"
default_args = ("x",)
class W38(VOTableSpecWarning):
"""
The only encoding for local binary data supported by the VOTable
specification is base64.
"""
message_template = "Inline binary data must be base64 encoded, got '{}'"
default_args = ("x",)
class W39(VOTableSpecWarning):
"""
Bit values do not support masking. This warning is raised upon
setting masked data in a bit column.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Bit values can not be masked"
class W40(VOTableSpecWarning):
"""
This is a terrible hack to support Simple Image Access Protocol
results from `NOIRLab Astro Data Archive <https://astroarchive.noirlab.edu/>`__. It
creates a field for the coordinate projection type of type "double",
which actually contains character data. We have to hack the field
to store character data, or we can't read it in. A warning will be
raised when this happens.
"""
message_template = "'cprojection' datatype repaired"
class W41(VOTableSpecWarning):
"""
An XML namespace was specified on the ``VOTABLE`` element, but the
namespace does not match what is expected for a ``VOTABLE`` file.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
Some files in the wild set the namespace to the location of the
VOTable schema, which is not correct and will not pass some
validating parsers.
"""
message_template = (
"An XML namespace is specified, but is incorrect. Expected '{}', got '{}'"
)
default_args = ("x", "y")
class W42(VOTableSpecWarning):
"""The root element should specify a namespace.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
"""
message_template = "No XML namespace specified"
class W43(VOTableSpecWarning):
"""Referenced elements should be defined before referees.
From the VOTable 1.2 spec:
In VOTable1.2, it is further recommended to place the ID
attribute prior to referencing it whenever possible.
"""
message_template = "{} ref='{}' which has not already been defined"
default_args = ("element", "x")
class W44(VOTableSpecWarning):
"""
``VALUES`` elements that reference another element should not have
their own content.
From the VOTable 1.2 spec:
The ``ref`` attribute of a ``VALUES`` element can be used to
avoid a repetition of the domain definition, by referring to a
previously defined ``VALUES`` element having the referenced
``ID`` attribute. When specified, the ``ref`` attribute
defines completely the domain without any other element or
attribute, as e.g. ``<VALUES ref="RAdomain"/>``
"""
message_template = "VALUES element with ref attribute has content ('{}')"
default_args = ("element",)
class W45(VOWarning, ValueError):
"""Invalid content-role attribute.
The ``content-role`` attribute on the ``LINK`` element must be one of
the following::
query, hints, doc, location
And in VOTable 1.3, additionally::
type
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
`1.3
<http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#sec:link>`__
"""
message_template = "content-role attribute '{}' invalid"
default_args = ("x",)
class W46(VOTableSpecWarning):
"""
The given char or unicode string is too long for the specified
field length.
"""
message_template = "{} value is too long for specified length of {}"
default_args = ("char or unicode", "x")
class W47(VOTableSpecWarning):
"""
If no arraysize is specified on a char field, the default of '1'
is implied, but this is rarely what is intended.
"""
message_template = "Missing arraysize indicates length 1"
class W48(VOTableSpecWarning):
"""
The attribute is not defined in the specification.
"""
message_template = "Unknown attribute '{}' on {}"
default_args = ("attribute", "element")
class W49(VOTableSpecWarning):
"""
Prior to VOTable 1.3, the empty cell was illegal for integer
fields.
If a \"null\" value was specified for the cell, it will be used
for the value, otherwise, 0 will be used.
"""
message_template = "Empty cell illegal for integer fields."
class W50(VOTableSpecWarning):
"""
Invalid unit string as defined in the `Units in the VO, Version 1.0
<https://www.ivoa.net/documents/VOUnits>`_ (VOTable version >= 1.4)
or `Standards for Astronomical Catalogues, Version 2.0
<https://cdsarc.cds.unistra.fr/doc/catstd-3.2.htx>`_ (version < 1.4).
Consider passing an explicit ``unit_format`` parameter if the units
in this file conform to another specification.
"""
message_template = "Invalid unit string '{}'"
default_args = ("x",)
class W51(VOTableSpecWarning):
"""
The integer value is out of range for the size of the field.
"""
message_template = "Value '{}' is out of range for a {} integer field"
default_args = ("x", "n-bit")
class W52(VOTableSpecWarning):
"""
The BINARY2 format was introduced in VOTable 1.3. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The BINARY2 format was introduced in VOTable 1.3, but "
"this file is declared as version '{}'"
)
default_args = ("1.2",)
class W53(VOTableSpecWarning):
"""
The VOTABLE element must contain at least one RESOURCE element.
"""
message_template = "VOTABLE element must contain at least one RESOURCE element."
default_args = ()
class W54(VOTableSpecWarning):
"""
The TIMESYS element was introduced in VOTable 1.4. It should
not be present in files marked as an earlier version.
"""
message_template = (
"The TIMESYS element was introduced in VOTable 1.4, but "
"this file is declared as version '{}'"
)
default_args = ("1.3",)
class W55(VOTableSpecWarning):
"""
When non-ASCII characters are detected when reading
a TABLEDATA value for a FIELD with ``datatype="char"``, we
can issue this warning.
"""
message_template = (
'FIELD ({}) has datatype="char" but contains non-ASCII value ({})'
)
default_args = ("", "")
class E01(VOWarning, ValueError):
"""Invalid size specifier for a field.
The size specifier for a ``char`` or ``unicode`` field must be
only a number followed, optionally, by an asterisk.
Multi-dimensional size specifiers are not supported for these
datatypes.
Strings, which are defined as a set of characters, can be
represented in VOTable as a fixed- or variable-length array of
characters::
<FIELD name="unboundedString" datatype="char" arraysize="*"/>
A 1D array of strings can be represented as a 2D array of
characters, but given the logic above, it is possible to define a
variable-length array of fixed-length strings, but not a
fixed-length array of variable-length strings.
"""
message_template = "Invalid size specifier '{}' for a {} field (in field '{}')"
default_args = ("x", "char/unicode", "y")
class E02(VOWarning, ValueError):
"""Incorrect number of elements in array.
The number of array elements in the data does not match that specified
in the FIELD specifier.
"""
message_template = (
"Incorrect number of elements in array. Expected multiple of {}, got {}"
)
default_args = ("x", "y")
class E03(VOWarning, ValueError):
"""Complex numbers should be two values separated by whitespace.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' does not parse as a complex number"
default_args = ("x",)
class E04(VOWarning, ValueError):
"""A ``bit`` array should be a string of '0's and '1's.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid bit value '{}'"
default_args = ("x",)
class E05(VOWarning, ValueError):
r"""Invalid boolean value.
A ``boolean`` value should be one of the following strings (case
insensitive) in the ``TABLEDATA`` format::
'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?'
and in ``BINARY`` format::
'T', 'F', '1', '0', '\0', ' ', '?'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid boolean value '{}'"
default_args = ("x",)
class E06(VOWarning, ValueError):
"""Unknown datatype on a field.
The supported datatypes are::
double, float, bit, boolean, unsignedByte, short, int, long,
floatComplex, doubleComplex, char, unicodeChar
The following non-standard aliases are also supported, but in
these case :ref:`W13 <W13>` will be raised::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Unknown datatype '{}' on field '{}'"
default_args = ("x", "y")
# E07: Deprecated
class E08(VOWarning, ValueError):
"""
The ``type`` attribute on the ``VALUES`` element must be either
``legal`` or ``actual``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "type must be 'legal' or 'actual', but is '{}'"
default_args = ("x",)
class E09(VOWarning, ValueError):
"""
The ``MIN``, ``MAX`` and ``OPTION`` elements must always have a
``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "'{}' must have a value attribute"
default_args = ("x",)
class E10(VOWarning, ValueError):
"""
From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have
a ``datatype`` field.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "'datatype' attribute required on all '{}' elements"
default_args = ("FIELD",)
class E11(VOWarning, ValueError):
"""
The precision attribute is meant to express the number of significant
digits, either as a number of decimal places (e.g. ``precision="F2"`` or
equivalently ``precision="2"`` to express 2 significant figures
after the decimal point), or as a number of significant figures
(e.g. ``precision="E5"`` indicates a relative precision of 10-5).
It is validated using the following regular expression::
[EF]?[1-9][0-9]*
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "precision '{}' is invalid"
default_args = ("x",)
class E12(VOWarning, ValueError):
"""
The width attribute is meant to indicate to the application the
number of characters to be used for input or output of the
quantity.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "width must be a positive integer, got '{}'"
default_args = ("x",)
class E13(VOWarning, ValueError):
r"""Invalid arraysize attribute.
From the VOTable 1.2 spec:
A table cell can contain an array of a given primitive type,
with a fixed or variable number of elements; the array may
even be multidimensional. For instance, the position of a
point in a 3D space can be defined by the following::
<FIELD ID="point_3D" datatype="double" arraysize="3"/>
and each cell corresponding to that definition must contain
exactly 3 numbers. An asterisk (\*) may be appended to
indicate a variable number of elements in the array, as in::
<FIELD ID="values" datatype="int" arraysize="100*"/>
where it is specified that each cell corresponding to that
definition contains 0 to 100 integer numbers. The number may
be omitted to specify an unbounded array (in practice up to
=~2×10⁹ elements).
A table cell can also contain a multidimensional array of a
given primitive type. This is specified by a sequence of
dimensions separated by the ``x`` character, with the first
dimension changing fastest; as in the case of a simple array,
the last dimension may be variable in length. As an example,
the following definition declares a table cell which may
contain a set of up to 10 images, each of 64×64 bytes::
<FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/>
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__
"""
message_template = "Invalid arraysize attribute '{}'"
default_args = ("x",)
class E14(VOWarning, ValueError):
"""
All ``PARAM`` elements must have a ``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "value attribute is required for all PARAM elements"
class E15(VOWarning, ValueError):
"""All ``COOSYS`` elements must have an ``ID`` attribute.
Note that the VOTable 1.1 specification says this attribute is
optional, but its corresponding schema indicates it is required.
In VOTable 1.2, the ``COOSYS`` element is deprecated.
"""
message_template = "ID attribute is required for all COOSYS elements"
class E16(VOTableSpecWarning):
"""Incorrect ``system`` attribute on COOSYS element.
The ``system`` attribute must be one of the following::
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', 'geo_app'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:COOSYS>`__
"""
message_template = "Invalid system attribute '{}'"
default_args = ("x",)
class E17(VOWarning, ValueError):
"""
``extnum`` attribute must be a positive integer.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "extnum must be a positive integer"
class E18(VOWarning, ValueError):
"""
The ``type`` attribute of the ``RESOURCE`` element must be one of
"results" or "meta".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "type must be 'results' or 'meta', not '{}'"
default_args = ("x",)
class E19(VOWarning, ValueError):
"""
Raised either when the file doesn't appear to be XML, or the root
element is not VOTABLE.
"""
message_template = "File does not appear to be a VOTABLE"
class E20(VOTableSpecError):
"""
The table had only *x* fields defined, but the data itself has more
columns than that.
"""
message_template = "Data has more columns than are defined in the header ({})"
default_args = ("x",)
class E21(VOWarning, ValueError):
"""
The table had *x* fields defined, but the data itself has only *y*
columns.
"""
message_template = "Data has fewer columns ({}) than are defined in the header ({})"
default_args = ("x", "y")
class E22(VOWarning, ValueError):
"""
All ``TIMESYS`` elements must have an ``ID`` attribute.
"""
message_template = "ID attribute is required for all TIMESYS elements"
class E23(VOTableSpecWarning):
"""
The ``timeorigin`` attribute on the ``TIMESYS`` element must be
either a floating point literal specifying a valid Julian Date,
or, for convenience, the string "MJD-origin" (standing for 2400000.5)
or the string "JD-origin" (standing for 0).
**References**: `1.4
<http://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC21>`__
"""
message_template = "Invalid timeorigin attribute '{}'"
default_args = ("x",)
class E24(VOWarning, ValueError):
"""
Non-ASCII unicode values should not be written when the FIELD ``datatype="char"``,
and cannot be written in BINARY or BINARY2 serialization.
"""
message_template = (
'Attempt to write non-ASCII value ({}) to FIELD ({}) which has datatype="char"'
)
default_args = ("", "")
class E25(VOTableSpecWarning):
"""
A VOTable cannot have a DATA section without any defined FIELD; DATA will be ignored.
"""
message_template = "No FIELDs are defined; DATA section will be ignored."
def _get_warning_and_exception_classes(prefix):
classes = []
for key, val in globals().items():
if re.match(prefix + "[0-9]{2}", key):
classes.append((key, val))
classes.sort()
return classes
def _build_doc_string():
def generate_set(prefix):
classes = _get_warning_and_exception_classes(prefix)
out = io.StringIO()
for name, cls in classes:
out.write(f".. _{name}:\n\n")
msg = f"{cls.__name__}: {cls.get_short_name()}"
if not isinstance(msg, str):
msg = msg.decode("utf-8")
out.write(msg)
out.write("\n")
out.write("~" * len(msg))
out.write("\n\n")
doc = cls.__doc__
if not isinstance(doc, str):
doc = doc.decode("utf-8")
out.write(dedent(doc))
out.write("\n\n")
return out.getvalue()
warnings = generate_set("W")
exceptions = generate_set("E")
return {"warnings": warnings, "exceptions": exceptions}
if __doc__ is not None:
__doc__ = __doc__.format(**_build_doc_string())
__all__.extend([x[0] for x in _get_warning_and_exception_classes("W")])
__all__.extend([x[0] for x in _get_warning_and_exception_classes("E")])
|
6129ea353c3dbc6e2f066fdcc172a6ba3eb65e32da838f9aeb6a611c92d16f45 | """
This module contains low level helper functions for compressing and
decompressing buffer for the Tiled Table Compression algorithms as specified in
the FITS 4 standard.
"""
import sys
from math import prod
import numpy as np
from astropy.io.fits.hdu.base import BITPIX2DTYPE
from .codecs import PLIO1, Gzip1, Gzip2, HCompress1, NoCompress, Rice1
from .quantization import DITHER_METHODS, QuantizationFailedException, Quantize
from .utils import _data_shape, _iter_array_tiles, _tile_shape
ALGORITHMS = {
"GZIP_1": Gzip1,
"GZIP_2": Gzip2,
"RICE_1": Rice1,
"RICE_ONE": Rice1,
"PLIO_1": PLIO1,
"HCOMPRESS_1": HCompress1,
"NOCOMPRESS": NoCompress,
}
DEFAULT_ZBLANK = -2147483648
__all__ = [
"compress_image_data",
"decompress_image_data_section",
]
def _decompress_tile(buf, *, algorithm: str, **settings):
"""
Decompress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The compressed buffer to be decompressed.
algorithm
A supported decompression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).decode(buf)
def _compress_tile(buf, *, algorithm: str, **settings):
"""
Compress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The decompressed buffer to be compressed.
algorithm
A supported compression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).encode(buf)
def _header_to_settings(header):
"""
Extract the settings which are constant given a header
"""
settings = {}
compression_type = header["ZCMPTYPE"]
if compression_type == "GZIP_2":
settings["itemsize"] = abs(header["ZBITPIX"]) // 8
elif compression_type in ("RICE_1", "RICE_ONE"):
settings["blocksize"] = _get_compression_setting(header, "BLOCKSIZE", 32)
settings["bytepix"] = _get_compression_setting(header, "BYTEPIX", 4)
elif compression_type == "HCOMPRESS_1":
settings["bytepix"] = 8
settings["scale"] = int(_get_compression_setting(header, "SCALE", 0))
settings["smooth"] = _get_compression_setting(header, "SMOOTH", 0)
return settings
def _update_tile_settings(settings, compression_type, actual_tile_shape):
"""
Update the settings with tile-specific settings
"""
if compression_type in ("PLIO_1", "RICE_1", "RICE_ONE"):
# We have to calculate the tilesize from the shape of the tile not the
# header, so that it's correct for edge tiles etc.
settings["tilesize"] = prod(actual_tile_shape)
elif compression_type == "HCOMPRESS_1":
# HCOMPRESS requires 2D tiles, so to find the shape of the 2D tile we
# need to ignore all length 1 tile dimensions
# Also cfitsio expects the tile shape in C order
shape_2d = tuple(nd for nd in actual_tile_shape if nd != 1)
if len(shape_2d) != 2:
raise ValueError(f"HCOMPRESS expects two dimensional tiles, got {shape_2d}")
settings["nx"] = shape_2d[0]
settings["ny"] = shape_2d[1]
return settings
def _finalize_array(tile_buffer, *, bitpix, tile_shape, algorithm, lossless):
"""
Convert a buffer to an array.
This is a helper function which takes a raw buffer (as output by .decode)
and translates it into a numpy array with the correct dtype, endianness and
shape.
"""
tile_size = prod(tile_shape)
if algorithm.startswith("GZIP") or algorithm == "NOCOMPRESS":
# This algorithm is taken from fitsio
# https://github.com/astropy/astropy/blob/a8cb1668d4835562b89c0d0b3448ac72ca44db63/cextern/cfitsio/lib/imcompress.c#L6345-L6388
tile_bytesize = len(tile_buffer)
if tile_bytesize == tile_size * 2:
dtype = ">i2"
elif tile_bytesize == tile_size * 4:
if bitpix < 0 and lossless:
dtype = ">f4"
else:
dtype = ">i4"
elif tile_bytesize == tile_size * 8:
if bitpix < 0 and lossless:
dtype = ">f8"
else:
dtype = ">i8"
else:
# Just return the raw bytes
dtype = ">u1"
tile_data = np.asarray(tile_buffer).view(dtype).reshape(tile_shape)
else:
# For RICE_1 compression the tiles that are on the edge can end up
# being padded, so we truncate excess values
if algorithm in ("RICE_1", "RICE_ONE", "PLIO_1") and tile_size < len(
tile_buffer
):
tile_buffer = tile_buffer[:tile_size]
if tile_buffer.data.format == "b":
# NOTE: this feels like a Numpy bug - need to investigate
tile_data = np.asarray(tile_buffer, dtype=np.uint8).reshape(tile_shape)
else:
tile_data = np.asarray(tile_buffer).reshape(tile_shape)
return tile_data
def _check_compressed_header(header):
# NOTE: this could potentially be moved up into CompImageHDU, e.g. in a
# _verify method.
# Check for overflows which might cause issues when calling C code
for kw in ["ZNAXIS", "ZVAL1", "ZVAL2", "ZBLANK", "BLANK"]:
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.intc).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for i in range(1, header["ZNAXIS"] + 1):
for kw_name in ["ZNAXIS", "ZTILE"]:
kw = f"{kw_name}{i}"
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int32).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for i in range(1, header["NAXIS"] + 1):
kw = f"NAXIS{i}"
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int64).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for kw in ["TNULL1", "PCOUNT", "THEAP"]:
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int64).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for kw in ["ZVAL3"]:
if kw in header:
if header[kw] > np.finfo(np.float32).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
# Validate data types
for kw in ["ZSCALE", "ZZERO", "TZERO1", "TSCAL1"]:
if kw in header:
if not np.isreal(header[kw]):
raise TypeError(f"{kw} should be floating-point")
for kw in ["TTYPE1", "TFORM1", "ZCMPTYPE", "ZNAME1", "ZQUANTIZ"]:
if kw in header:
if not isinstance(header[kw], str):
raise TypeError(f"{kw} should be a string")
for kw in ["ZDITHER0"]:
if kw in header:
if not np.isreal(header[kw]) or not float(header[kw]).is_integer():
raise TypeError(f"{kw} should be an integer")
if "TFORM1" in header:
for valid in ["1PB", "1PI", "1PJ", "1QB", "1QI", "1QJ"]:
if header["TFORM1"].startswith(valid):
break
else:
raise RuntimeError(f"Invalid TFORM1: {header['TFORM1']}")
# Check values
for kw in ["TFIELDS", "PCOUNT"] + [
f"NAXIS{idx + 1}" for idx in range(header["NAXIS"])
]:
if kw in header:
if header[kw] < 0:
raise ValueError(f"{kw} should not be negative.")
for kw in ["ZNAXIS", "TFIELDS"]:
if kw in header:
if header[kw] < 0 or header[kw] > 999:
raise ValueError(f"{kw} should be in the range 0 to 999")
if header["ZBITPIX"] not in [8, 16, 32, 64, -32, -64]:
raise ValueError(f"Invalid value for BITPIX: {header['ZBITPIX']}")
if header["ZCMPTYPE"] not in ALGORITHMS:
raise ValueError(f"Unrecognized compression type: {header['ZCMPTYPE']}")
# Check that certain keys are present
header["ZNAXIS"]
header["ZBITPIX"]
def _get_compression_setting(header, name, default):
# Settings for the various compression algorithms are stored in pairs of
# keywords called ZNAME? and ZVAL? - a given compression setting could be
# in any ZNAME? so we need to check through all the possible ZNAMEs which
# one matches the required setting.
for i in range(1, 1000):
if f"ZNAME{i}" not in header:
break
if header[f"ZNAME{i}"].lower() == name.lower():
return header[f"ZVAL{i}"]
return default
def _column_dtype(compressed_coldefs, column_name):
tform = compressed_coldefs[column_name].format
if tform[2] == "B":
dtype = np.uint8
elif tform[2] == "I":
dtype = ">i2"
elif tform[2] == "J":
dtype = ">i4"
return np.dtype(dtype)
def _get_data_from_heap(hdu, size, offset, dtype, heap_cache=None):
if heap_cache is None:
return hdu._get_raw_data(size, dtype, hdu._data_offset + hdu._theap + offset)
else:
itemsize = dtype.itemsize
data = heap_cache[offset : offset + size * itemsize]
if itemsize > 1:
return data.view(dtype)
else:
return data
def decompress_image_data_section(
compressed_data,
compression_type,
compressed_header,
bintable,
first_tile_index,
last_tile_index,
):
"""
Decompress the data in a `~astropy.io.fits.CompImageHDU`.
Parameters
----------
compressed_data : `~astropy.io.fits.FITS_rec`
The compressed data
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
bintable : `~astropy.io.fits.BinTableHDU`
The binary table HDU, used to access the raw heap data
first_tile_index : iterable
The indices of the first tile to decompress along each dimension
last_tile_index : iterable
The indices of the last tile to decompress along each dimension
Returns
-------
data : `numpy.ndarray`
The decompressed data array.
"""
compressed_coldefs = compressed_data._coldefs
_check_compressed_header(compressed_header)
tile_shape = _tile_shape(compressed_header)
data_shape = _data_shape(compressed_header)
first_array_index = first_tile_index * tile_shape
last_array_index = (last_tile_index + 1) * tile_shape
last_array_index = np.minimum(data_shape, last_array_index)
buffer_shape = tuple((last_array_index - first_array_index).astype(int))
image_data = np.empty(
buffer_shape, dtype=BITPIX2DTYPE[compressed_header["ZBITPIX"]]
)
quantized = "ZSCALE" in compressed_data.dtype.names
if image_data.size == 0:
return image_data
settings = _header_to_settings(compressed_header)
zbitpix = compressed_header["ZBITPIX"]
dither_method = DITHER_METHODS[compressed_header.get("ZQUANTIZ", "NO_DITHER")]
dither_seed = compressed_header.get("ZDITHER0", 0)
# NOTE: in the following and below we convert the column to a Numpy array
# for performance reasons, as accessing rows from a FITS_rec column is
# otherwise slow.
compressed_data_column = np.array(compressed_data["COMPRESSED_DATA"])
compressed_data_dtype = _column_dtype(compressed_coldefs, "COMPRESSED_DATA")
if "ZBLANK" in compressed_coldefs.dtype.names:
zblank_column = np.array(compressed_data["ZBLANK"])
else:
zblank_column = None
if "ZSCALE" in compressed_coldefs.dtype.names:
zscale_column = np.array(compressed_data["ZSCALE"])
else:
zscale_column = None
if "ZZERO" in compressed_coldefs.dtype.names:
zzero_column = np.array(compressed_data["ZZERO"])
else:
zzero_column = None
zblank_header = compressed_header.get("ZBLANK", None)
gzip_compressed_data_column = None
gzip_compressed_data_dtype = None
# If all the data is requested, read in all the heap.
if tuple(buffer_shape) == tuple(data_shape):
heap_cache = bintable._get_raw_data(
compressed_header["PCOUNT"],
np.uint8,
bintable._data_offset + bintable._theap,
)
else:
heap_cache = None
for row_index, tile_slices in _iter_array_tiles(
data_shape, tile_shape, first_tile_index, last_tile_index
):
# For tiles near the edge, the tile shape from the header might not be
# correct so we have to pass the shape manually.
actual_tile_shape = image_data[tile_slices].shape
settings = _update_tile_settings(settings, compression_type, actual_tile_shape)
if compressed_data_column[row_index][0] == 0:
if gzip_compressed_data_column is None:
gzip_compressed_data_column = np.array(
compressed_data["GZIP_COMPRESSED_DATA"]
)
gzip_compressed_data_dtype = _column_dtype(
compressed_coldefs, "GZIP_COMPRESSED_DATA"
)
# When quantizing floating point data, sometimes the data will not
# quantize efficiently. In these cases the raw floating point data can
# be losslessly GZIP compressed and stored in the `GZIP_COMPRESSED_DATA`
# column.
cdata = _get_data_from_heap(
bintable,
*gzip_compressed_data_column[row_index],
gzip_compressed_data_dtype,
heap_cache=heap_cache,
)
tile_buffer = _decompress_tile(cdata, algorithm="GZIP_1")
tile_data = _finalize_array(
tile_buffer,
bitpix=zbitpix,
tile_shape=actual_tile_shape,
algorithm="GZIP_1",
lossless=True,
)
else:
cdata = _get_data_from_heap(
bintable,
*compressed_data_column[row_index],
compressed_data_dtype,
heap_cache=heap_cache,
)
if compression_type == "GZIP_2":
# Decompress with GZIP_1 just to find the total number of
# elements in the uncompressed data.
# TODO: find a way to avoid doing this for all tiles
tile_data = np.asarray(_decompress_tile(cdata, algorithm="GZIP_1"))
settings["itemsize"] = tile_data.size // int(prod(actual_tile_shape))
tile_buffer = _decompress_tile(
cdata, algorithm=compression_type, **settings
)
tile_data = _finalize_array(
tile_buffer,
bitpix=zbitpix,
tile_shape=actual_tile_shape,
algorithm=compression_type,
lossless=not quantized,
)
if zblank_column is None:
zblank = zblank_header
else:
zblank = zblank_column[row_index]
if zblank is not None:
blank_mask = tile_data == zblank
if quantized:
q = Quantize(
row=(row_index + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=None,
bitpix=zbitpix,
)
tile_data = np.asarray(
q.decode_quantized(
tile_data, zscale_column[row_index], zzero_column[row_index]
)
).reshape(actual_tile_shape)
if zblank is not None:
if not tile_data.flags.writeable:
tile_data = tile_data.copy()
tile_data[blank_mask] = np.nan
image_data[tile_slices] = tile_data
return image_data
def compress_image_data(
image_data,
compression_type,
compressed_header,
compressed_coldefs,
):
"""
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
image_data : `~numpy.ndarray`
The image data to compress
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
compressed_coldefs : `~astropy.io.fits.ColDefs`
The ColDefs object for the compressed binary table
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap.
"""
if not isinstance(image_data, np.ndarray):
raise TypeError("Image data must be a numpy.ndarray")
_check_compressed_header(compressed_header)
# TODO: This implementation is memory inefficient as it generates all the
# compressed bytes before forming them into the heap, leading to 2x the
# potential memory usage. Directly storing the compressed bytes into an
# expanding heap would fix this.
tile_shape = _tile_shape(compressed_header)
data_shape = _data_shape(compressed_header)
compressed_bytes = []
gzip_fallback = []
scales = []
zeros = []
zblank = None
noisebit = _get_compression_setting(compressed_header, "noisebit", 0)
settings = _header_to_settings(compressed_header)
for irow, tile_slices in _iter_array_tiles(data_shape, tile_shape):
tile_data = image_data[tile_slices]
settings = _update_tile_settings(settings, compression_type, tile_data.shape)
quantize = "ZSCALE" in compressed_coldefs.dtype.names
if tile_data.dtype.kind == "f" and quantize:
dither_method = DITHER_METHODS[
compressed_header.get("ZQUANTIZ", "NO_DITHER")
]
dither_seed = compressed_header.get("ZDITHER0", 0)
q = Quantize(
row=(irow + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=noisebit,
bitpix=compressed_header["ZBITPIX"],
)
original_shape = tile_data.shape
# If there are any NaN values in the data, we should reset them to
# a value that will not affect the quantization (an already existing
# data value in the array) and we can then reset this after quantization
# to ZBLANK and set the appropriate header keyword
nan_mask = np.isnan(tile_data)
any_nan = np.any(nan_mask)
if any_nan:
# Note that we need to copy here to avoid modifying the input array.
tile_data = tile_data.copy()
if np.all(nan_mask):
tile_data[nan_mask] = 0
else:
tile_data[nan_mask] = np.nanmin(tile_data)
try:
tile_data, scale, zero = q.encode_quantized(tile_data)
except QuantizationFailedException:
if any_nan:
# reset NaN values since we will losslessly compress.
tile_data[nan_mask] = np.nan
scales.append(0)
zeros.append(0)
gzip_fallback.append(True)
else:
tile_data = np.asarray(tile_data).reshape(original_shape)
if any_nan:
if not tile_data.flags.writeable:
tile_data = tile_data.copy()
# For now, we just use the default ZBLANK value and assume
# this is the same for all tiles. We could generalize this
# to allow different ZBLANK values (for example if the data
# includes this value by chance) and to allow different values
# per tile, which is allowed by the FITS standard.
tile_data[nan_mask] = DEFAULT_ZBLANK
zblank = DEFAULT_ZBLANK
scales.append(scale)
zeros.append(zero)
gzip_fallback.append(False)
else:
scales.append(0)
zeros.append(0)
gzip_fallback.append(False)
if gzip_fallback[-1]:
cbytes = _compress_tile(tile_data, algorithm="GZIP_1")
else:
cbytes = _compress_tile(tile_data, algorithm=compression_type, **settings)
compressed_bytes.append(cbytes)
if zblank is not None:
compressed_header["ZBLANK"] = zblank
table = np.zeros(
len(compressed_bytes), dtype=compressed_coldefs.dtype.newbyteorder(">")
)
if "ZSCALE" in table.dtype.names:
table["ZSCALE"] = np.array(scales)
table["ZZERO"] = np.array(zeros)
for irow, cbytes in enumerate(compressed_bytes):
table["COMPRESSED_DATA"][irow, 0] = len(cbytes)
table["COMPRESSED_DATA"][:1, 1] = 0
table["COMPRESSED_DATA"][1:, 1] = np.cumsum(table["COMPRESSED_DATA"][:-1, 0])
for irow in range(len(compressed_bytes)):
if gzip_fallback[irow]:
table["GZIP_COMPRESSED_DATA"][irow] = table["COMPRESSED_DATA"][irow]
table["COMPRESSED_DATA"][irow] = 0
# For PLIO_1, the size of each heap element is a factor of two lower than
# the real size - not clear if this is deliberate or bug somewhere.
if compression_type == "PLIO_1":
table["COMPRESSED_DATA"][:, 0] //= 2
# For PLIO_1, it looks like the compressed data is always stored big endian
if compression_type == "PLIO_1":
for irow in range(len(compressed_bytes)):
if not gzip_fallback[irow]:
array = np.frombuffer(compressed_bytes[irow], dtype="i2")
if array.dtype.byteorder == "<" or (
array.dtype.byteorder == "=" and sys.byteorder == "little"
):
compressed_bytes[irow] = array.astype(">i2", copy=False).tobytes()
compressed_bytes = b"".join(compressed_bytes)
table_bytes = table.tobytes()
heap = table.tobytes() + compressed_bytes
return len(compressed_bytes), np.frombuffer(heap, dtype=np.uint8)
|
5db5a68157a06ddad60d8e1ca3936c070e2236fa4a4161b266b6d0182b756658 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import sys
import numpy as np
from astropy.io.fits.column import FITS2NUMPY, ColDefs, Column
from astropy.io.fits.fitsrec import FITS_rec, FITS_record
from astropy.io.fits.util import _is_int, _is_pseudo_integer, _pseudo_zero
from astropy.utils import lazyproperty
from .base import DELAYED, DTYPE2BITPIX
from .image import PrimaryHDU
from .table import _TableLikeHDU
class Group(FITS_record):
"""
One group of the random group data.
"""
def __init__(self, input, row=0, start=None, end=None, step=None, base=None):
super().__init__(input, row, start, end, step, base)
@property
def parnames(self):
return self.array.parnames
@property
def data(self):
# The last column in the coldefs is the data portion of the group
return self.field(self.array._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter value.
"""
if _is_int(parname):
result = self.array[self.row][parname]
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.array[self.row][indx[0]]
# if more than one group parameter have the same name
else:
result = self.array[self.row][indx[0]].astype("f8")
for i in indx[1:]:
result += self.array[self.row][i]
return result
def setpar(self, parname, value):
"""
Set the group parameter value.
"""
# TODO: It would be nice if, instead of requiring a multi-part value to
# be an array, there were an *option* to automatically split the value
# into multiple columns if it doesn't already fit in the array data
# type.
if _is_int(parname):
self.array[self.row][parname] = value
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
self.array[self.row][indx[0]] = value
# if more than one group parameter have the same name, the
# value must be a list (or tuple) containing arrays
else:
if isinstance(value, (list, tuple)) and len(indx) == len(value):
for i in range(len(indx)):
self.array[self.row][indx[i]] = value[i]
else:
raise ValueError(
"Parameter value must be a sequence with "
"{} arrays/numbers.".format(len(indx))
)
class GroupData(FITS_rec):
"""
Random groups data object.
Allows structured access to FITS Group data in a manner analogous
to tables.
"""
_record_type = Group
def __new__(
cls,
input=None,
bitpix=None,
pardata=None,
parnames=[],
bscale=None,
bzero=None,
parbscales=None,
parbzeros=None,
):
"""
Parameters
----------
input : array or FITS_rec instance
input data, either the group data itself (a
`numpy.ndarray`) or a record array (`FITS_rec`) which will
contain both group parameter info and the data. The rest
of the arguments are used only for the first case.
bitpix : int
data type as expressed in FITS ``BITPIX`` value (8, 16, 32,
64, -32, or -64)
pardata : sequence of array
parameter data, as a list of (numeric) arrays.
parnames : sequence of str
list of parameter names.
bscale : int
``BSCALE`` of the data
bzero : int
``BZERO`` of the data
parbscales : sequence of int
list of bscales for the parameters
parbzeros : sequence of int
list of bzeros for the parameters
"""
if not isinstance(input, FITS_rec):
if pardata is None:
npars = 0
else:
npars = len(pardata)
if parbscales is None:
parbscales = [None] * npars
if parbzeros is None:
parbzeros = [None] * npars
if parnames is None:
parnames = [f"PAR{idx + 1}" for idx in range(npars)]
if len(parnames) != npars:
raise ValueError(
"The number of parameter data arrays does "
"not match the number of parameters."
)
unique_parnames = _unique_parnames(parnames + ["DATA"])
if bitpix is None:
bitpix = DTYPE2BITPIX[input.dtype.name]
fits_fmt = GroupsHDU._bitpix2tform[bitpix] # -32 -> 'E'
format = FITS2NUMPY[fits_fmt] # 'E' -> 'f4'
data_fmt = f"{input.shape[1:]}{format}"
formats = ",".join(([format] * npars) + [data_fmt])
gcount = input.shape[0]
cols = [
Column(
name=unique_parnames[idx],
format=fits_fmt,
bscale=parbscales[idx],
bzero=parbzeros[idx],
)
for idx in range(npars)
]
cols.append(
Column(
name=unique_parnames[-1],
format=fits_fmt,
bscale=bscale,
bzero=bzero,
)
)
coldefs = ColDefs(cols)
self = FITS_rec.__new__(
cls,
np.rec.array(None, formats=formats, names=coldefs.names, shape=gcount),
)
# By default the data field will just be 'DATA', but it may be
# uniquified if 'DATA' is already used by one of the group names
self._data_field = unique_parnames[-1]
self._coldefs = coldefs
self.parnames = parnames
for idx, name in enumerate(unique_parnames[:-1]):
column = coldefs[idx]
# Note: _get_scale_factors is used here and in other cases
# below to determine whether the column has non-default
# scale/zero factors.
# TODO: Find a better way to do this than using this interface
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(name, pardata[idx])
else:
np.rec.recarray.field(self, idx)[:] = pardata[idx]
column = coldefs[self._data_field]
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(self._data_field, input)
else:
np.rec.recarray.field(self, npars)[:] = input
else:
self = FITS_rec.__new__(cls, input)
self.parnames = None
return self
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if isinstance(obj, GroupData):
self.parnames = obj.parnames
elif isinstance(obj, FITS_rec):
self.parnames = obj._coldefs.names
def __getitem__(self, key):
out = super().__getitem__(key)
if isinstance(out, GroupData):
out.parnames = self.parnames
return out
@property
def data(self):
"""
The raw group data represented as a multi-dimensional `numpy.ndarray`
array.
"""
# The last column in the coldefs is the data portion of the group
return self.field(self._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter values.
"""
if _is_int(parname):
result = self.field(parname)
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.field(indx[0])
# if more than one group parameter have the same name
else:
result = self.field(indx[0]).astype("f8")
for i in indx[1:]:
result += self.field(i)
return result
class GroupsHDU(PrimaryHDU, _TableLikeHDU):
"""
FITS Random Groups HDU class.
See the :ref:`astropy:random-groups` section in the Astropy documentation
for more details on working with this type of HDU.
"""
_bitpix2tform = {8: "B", 16: "I", 32: "J", 64: "K", -32: "E", -64: "D"}
_data_type = GroupData
_data_field = "DATA"
"""
The name of the table record array field that will contain the group data
for each group; 'DATA' by default, but may be preceded by any number of
underscores if 'DATA' is already a parameter name
"""
def __init__(self, data=None, header=None):
super().__init__(data=data, header=header)
if data is not DELAYED:
self.update_header()
# Update the axes; GROUPS HDUs should always have at least one axis
if len(self._axes) <= 0:
self._axes = [0]
self._header["NAXIS"] = 1
self._header.set("NAXIS1", 0, after="NAXIS")
@classmethod
def match_header(cls, header):
keyword = header.cards[0].keyword
return keyword == "SIMPLE" and "GROUPS" in header and header["GROUPS"] is True
@lazyproperty
def data(self):
"""
The data of a random group FITS file will be like a binary table's
data.
"""
if self._axes == [0]:
return
data = self._get_tbdata()
data._coldefs = self.columns
data.parnames = self.parnames
del self.columns
return data
@lazyproperty
def parnames(self):
"""The names of the group parameters as described by the header."""
pcount = self._header["PCOUNT"]
# The FITS standard doesn't really say what to do if a parname is
# missing, so for now just assume that won't happen
return [self._header["PTYPE" + str(idx + 1)] for idx in range(pcount)]
@lazyproperty
def columns(self):
if self._has_data and hasattr(self.data, "_coldefs"):
return self.data._coldefs
format = self._bitpix2tform[self._header["BITPIX"]]
pcount = self._header["PCOUNT"]
parnames = []
bscales = []
bzeros = []
for idx in range(pcount):
bscales.append(self._header.get("PSCAL" + str(idx + 1), None))
bzeros.append(self._header.get("PZERO" + str(idx + 1), None))
parnames.append(self._header["PTYPE" + str(idx + 1)])
formats = [format] * len(parnames)
dim = [None] * len(parnames)
# Now create columns from collected parameters, but first add the DATA
# column too, to contain the group data.
parnames.append("DATA")
bscales.append(self._header.get("BSCALE"))
bzeros.append(self._header.get("BZEROS"))
data_shape = self.shape[:-1]
formats.append(str(int(np.prod(data_shape))) + format)
dim.append(data_shape)
parnames = _unique_parnames(parnames)
self._data_field = parnames[-1]
cols = [
Column(name=name, format=fmt, bscale=bscale, bzero=bzero, dim=dim)
for name, fmt, bscale, bzero, dim in zip(
parnames, formats, bscales, bzeros, dim
)
]
coldefs = ColDefs(cols)
return coldefs
@property
def _nrows(self):
if not self._data_loaded:
# The number of 'groups' equates to the number of rows in the table
# representation of the data
return self._header.get("GCOUNT", 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
# Only really a lazyproperty for symmetry with _TableBaseHDU
return 0
@property
def is_image(self):
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
size = 0
naxis = self._header.get("NAXIS", 0)
# for random group image, NAXIS1 should be 0, so we skip NAXIS1.
if naxis > 1:
size = 1
for idx in range(1, naxis):
size = size * self._header["NAXIS" + str(idx + 1)]
bitpix = self._header["BITPIX"]
gcount = self._header.get("GCOUNT", 1)
pcount = self._header.get("PCOUNT", 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def update_header(self):
old_naxis = self._header.get("NAXIS", 0)
if self._data_loaded:
if isinstance(self.data, GroupData):
self._axes = list(self.data.data.shape)[1:]
self._axes.reverse()
self._axes = [0] + self._axes
field0 = self.data.dtype.names[0]
field0_code = self.data.dtype.fields[field0][0].name
elif self.data is None:
self._axes = [0]
field0_code = "uint8" # For lack of a better default
else:
raise ValueError("incorrect array type")
self._header["BITPIX"] = DTYPE2BITPIX[field0_code]
self._header["NAXIS"] = len(self._axes)
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
if idx == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(idx)
self._header.set("NAXIS" + str(idx + 1), axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header["NAXIS" + str(idx)]
except KeyError:
pass
if self._has_data and isinstance(self.data, GroupData):
self._header.set("GROUPS", True, after="NAXIS" + str(len(self._axes)))
self._header.set("PCOUNT", len(self.data.parnames), after="GROUPS")
self._header.set("GCOUNT", len(self.data), after="PCOUNT")
column = self.data._coldefs[self._data_field]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set("BSCALE", column.bscale)
if zero:
self._header.set("BZERO", column.bzero)
for idx, name in enumerate(self.data.parnames):
self._header.set("PTYPE" + str(idx + 1), name)
column = self.data._coldefs[idx]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set("PSCAL" + str(idx + 1), column.bscale)
if zero:
self._header.set("PZERO" + str(idx + 1), column.bzero)
# Update the position of the EXTEND keyword if it already exists
if "EXTEND" in self._header:
if len(self._axes):
after = "NAXIS" + str(len(self._axes))
else:
after = "NAXIS"
self._header.set("EXTEND", after=after)
def _writedata_internal(self, fileobj):
"""
Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but
we have to get the data's byte order a different way...
TODO: Might be nice to store some indication of the data's byte order
as an attribute or function so that we don't have to do this.
"""
size = 0
if self.data is not None:
self.data._scale_back()
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f">i{self.data.dtype.itemsize}",
)
should_swap = False
else:
output = self.data
fname = self.data.dtype.names[0]
byteorder = self.data.dtype.fields[fname][0].str[0]
should_swap = byteorder in swap_types
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify locations and values of mandatory keywords.
self.req_cards(
"NAXIS", 2, lambda v: (_is_int(v) and 1 <= v <= 999), 1, option, errs
)
self.req_cards("NAXIS1", 3, lambda v: (_is_int(v) and v == 0), 0, option, errs)
after = self._header["NAXIS"] + 3
pos = lambda x: x >= after
self.req_cards("GCOUNT", pos, _is_int, 1, option, errs)
self.req_cards("PCOUNT", pos, _is_int, 0, option, errs)
self.req_cards("GROUPS", pos, lambda v: (v is True), True, option, errs)
return errs
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
# TODO: Maybe check this on a per-field basis instead of assuming
# that all fields have the same byte order?
byteorder = self.data.dtype.fields[self.data.dtype.names[0]][0].str[0]
if byteorder != ">":
if self.data.flags.writeable:
byteswapped = True
d = self.data.byteswap(True)
d.dtype = d.dtype.newbyteorder(">")
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = self.data.byteswap(False)
d.dtype = d.dtype.newbyteorder(">")
byteswapped = False
else:
byteswapped = False
d = self.data
byte_data = d.view(type=np.ndarray, dtype=np.ubyte)
cs = self._compute_checksum(byte_data)
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped:
d.byteswap(True)
d.dtype = d.dtype.newbyteorder("<")
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _summary(self):
summary = super()._summary()
name, ver, classname, length, shape, format, gcount = summary
# Drop the first axis from the shape
if shape:
shape = shape[1:]
if shape and all(shape):
# Update the format
format = self.columns[0].dtype.name
# Update the GCOUNT report
gcount = f"{self._gcount} Groups {self._pcount} Parameters"
return (name, ver, classname, length, shape, format, gcount)
def _par_indices(names):
"""
Given a list of objects, returns a mapping of objects in that list to the
index or indices at which that object was found in the list.
"""
unique = {}
for idx, name in enumerate(names):
# Case insensitive
name = name.upper()
if name in unique:
unique[name].append(idx)
else:
unique[name] = [idx]
return unique
def _unique_parnames(names):
"""
Given a list of parnames, including possible duplicates, returns a new list
of parnames with duplicates prepended by one or more underscores to make
them unique. This is also case insensitive.
"""
upper_names = set()
unique_names = []
for name in names:
name_upper = name.upper()
while name_upper in upper_names:
name = "_" + name
name_upper = "_" + name_upper
unique_names.append(name)
upper_names.add(name_upper)
return unique_names
|
9bfd82ad4dc2e12e1083c80e41c227a21dd72e0e3c08a24ce723f1800b44cd02 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import csv
import operator
import os
import re
import sys
import textwrap
import warnings
from contextlib import suppress
import numpy as np
from numpy import char as chararray
# This module may have many dependencies on astropy.io.fits.column, but
# astropy.io.fits.column has fewer dependencies overall, so it's easier to
# keep table/column-related utilities in astropy.io.fits.column
from astropy.io.fits.column import (
ATTRIBUTE_TO_KEYWORD,
FITS2NUMPY,
KEYWORD_NAMES,
KEYWORD_TO_ATTRIBUTE,
TDEF_RE,
ColDefs,
Column,
_AsciiColDefs,
_cmp_recformats,
_convert_format,
_FormatP,
_FormatQ,
_makep,
_parse_tformat,
_scalar_to_format,
)
from astropy.io.fits.fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import _is_int, _str_to_num, path_like
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from .base import DELAYED, ExtensionHDU, _ValidHDU
class FITSTableDumpDialect(csv.excel):
"""
A CSV dialect for the Astropy format of ASCII dumps of FITS tables.
"""
delimiter = " "
lineterminator = "\n"
quotechar = '"'
quoting = csv.QUOTE_ALL
skipinitialspace = True
class _TableLikeHDU(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
# The following flag can be used by subclasses to determine whether to load
# variable length data from the heap automatically or whether the columns
# should contain the size and offset in the heap and let the subclass
# decide when to load the data from the heap. This can be used for example
# in CompImageHDU to only load data tiles that are needed.
_load_variable_length_data = True
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(
cls,
columns,
header=None,
nrows=0,
fill=False,
character_as_bytes=False,
**kwargs,
):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs` -like
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(
coldefs, nrows=nrows, fill=fill, character_as_bytes=character_as_bytes
)
hdu = cls(
data=data, header=header, character_as_bytes=character_as_bytes, **kwargs
)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (
self._load_variable_length_data
and any(type(r) in (_FormatP, _FormatQ) for r in columns._recformats)
and self._data_size is not None
and self._data_size > self._theap
):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8, self._data_offset)
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data = raw_data[:tbsize].view(dtype=columns.dtype, type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype, self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
data._load_variable_length_data = self._load_variable_length_data
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder(">")
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header["PCOUNT"]
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data._gap = self._theap - tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_load_data(self):
"""Load the data if asked to."""
if not self._data_loaded:
self.data # noqa: B018
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
FITS table extension base HDU class.
Parameters
----------
data : array
Data to be used.
header : `Header` instance
Header to be used. If the ``data`` is also specified, header keywords
specifically related to defining the table structure (such as the
"TXXXn" keywords like TTYPEn) will be overridden by the supplied column
definitions, but all other informational and data model-specific
keywords are kept.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_manages_own_heap = False
"""
This flag implies that when writing VLA tables (P/Q format) the heap
pointers that go into P/Q table columns should not be reordered or
rearranged in any way by the default heap management code.
This is included primarily as an optimization for compressed image HDUs
which perform their own heap maintenance.
"""
def __init__(
self,
data=None,
header=None,
name=None,
uint=False,
ver=None,
character_as_bytes=False,
):
super().__init__(data=data, header=header, name=name, ver=ver)
self._uint = uint
self._character_as_bytes = character_as_bytes
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError("No header to setup HDU.")
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# construct a list of cards of minimal header
cards = [
("XTENSION", self._extension, self._ext_comment),
("BITPIX", 8, "array data type"),
("NAXIS", 2, "number of array dimensions"),
("NAXIS1", 0, "length of dimension 1"),
("NAXIS2", 0, "length of dimension 2"),
("PCOUNT", 0, "number of group parameters"),
("GCOUNT", 1, "number of groups"),
("TFIELDS", 0, "number of table fields"),
]
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy(strip=True)
cards.extend(hcopy.cards)
self._header = Header(cards)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# self._data_type is FITS_rec.
if isinstance(data, self._data_type):
self.data = data
else:
self.data = self._data_type.from_columns(data)
# TEMP: Special column keywords are normally overwritten by attributes
# from Column objects. In Astropy 3.0, several new keywords are now
# recognized as being special column keywords, but we don't
# automatically clear them yet, as we need to raise a deprecation
# warning for at least one major version.
if header is not None:
future_ignore = set()
for keyword in header.keys():
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group("label")
except Exception:
continue # skip if there is no match
if base_keyword in {
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
}:
future_ignore.add(base_keyword)
if future_ignore:
keys = ", ".join(x + "n" for x in sorted(future_ignore))
warnings.warn(
"The following keywords are now recognized as special "
"column-related attributes and should be set via the "
"Column objects: {}. In future, these values will be "
"dropped from manually specified headers automatically "
"and replaced with values generated based on the "
"Column objects.".format(keys),
AstropyDeprecationWarning,
)
# TODO: Too much of the code in this class uses header keywords
# in making calculations related to the data size. This is
# unreliable, however, in cases when users mess with the header
# unintentionally--code that does this should be cleaned up.
self._header["NAXIS1"] = self.data._raw_itemsize
self._header["NAXIS2"] = self.data.shape[0]
self._header["TFIELDS"] = len(self.data._coldefs)
self.columns = self.data._coldefs
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError("Table data has incorrect type.")
# Ensure that the correct EXTNAME is set on the new header if one was
# created, or that it overrides the existing EXTNAME if different
if name:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
This is an abstract type that implements the shared functionality of
the ASCII and Binary Table HDU types, which should be used instead of
this.
"""
raise NotImplementedError
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
if self._has_data and hasattr(self.data, "_coldefs"):
return self.data._coldefs
return self._columns_type(self)
@lazyproperty
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
data._character_as_bytes = self._character_as_bytes
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@data.setter
def data(self, data):
if "data" in self.__dict__:
if self.__dict__["data"] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
self._modified = True
if data is None and self.columns:
# Create a new table with the same columns, but empty rows
formats = ",".join(self.columns._recformats)
data = np.rec.array(
None, formats=formats, names=self.columns.names, shape=0
)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# Go ahead and always make a view, even if the data is already the
# correct class (self._data_type) so we can update things like the
# column defs, if necessary
data = data.view(self._data_type)
if not isinstance(data.columns, self._columns_type):
# This would be the place, if the input data was for an ASCII
# table and this is binary table, or vice versa, to convert the
# data to the appropriate format for the table type
new_columns = self._columns_type(data.columns)
data = FITS_rec.from_columns(new_columns)
if "data" in self.__dict__:
self.columns._remove_listener(self.__dict__["data"])
self.__dict__["data"] = data
self.columns = self.data.columns
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError("Table data has incorrect type.")
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
@property
def _nrows(self):
if not self._data_loaded:
return self._header.get("NAXIS2", 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
size = self._header["NAXIS1"] * self._header["NAXIS2"]
return self._header.get("THEAP", size)
# TODO: Need to either rename this to update_header, for symmetry with the
# Image HDUs, or just at some point deprecate it and remove it altogether,
# since header updates should occur automatically when necessary...
def update(self):
"""
Update header keywords to reflect recent changes of columns.
"""
self._header.set("NAXIS1", self.data._raw_itemsize, after="NAXIS")
self._header.set("NAXIS2", self.data.shape[0], after="NAXIS1")
self._header.set("TFIELDS", len(self.columns), after="GCOUNT")
self._clear_table_keywords()
self._populate_table_keywords()
def copy(self):
"""
Make a copy of the table HDU, both header and data are copied.
"""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
return self.__class__(data=self.data.copy(), header=self._header.copy())
def _prewriteto(self, checksum=False, inplace=False):
if self._has_data:
self.data._scale_back(update_heap_pointers=not self._manages_own_heap)
# check TFIELDS and NAXIS2
self._header["TFIELDS"] = len(self.data._coldefs)
self._header["NAXIS2"] = self.data.shape[0]
# calculate PCOUNT, for variable length tables
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
heapstart = self._header.get("THEAP", tbsize)
self.data._gap = heapstart - tbsize
pcount = self.data._heapsize + self.data._gap
if pcount > 0:
self._header["PCOUNT"] = pcount
# update the other T****n keywords
self._populate_table_keywords()
# update TFORM for variable length columns
for idx in range(self.data._nfields):
format = self.data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.data.field(idx).max
# May be either _FormatP or _FormatQ
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat, max=_max)
self._header["TFORM" + str(idx + 1)] = format.tform
return super()._prewriteto(checksum, inplace)
def _verify(self, option="warn"):
"""
_TableBaseHDU verify method.
"""
errs = super()._verify(option=option)
if len(self._header) > 1:
if not (
isinstance(self._header[0], str)
and self._header[0].rstrip() == self._extension
):
err_text = "The XTENSION keyword must match the HDU type."
fix_text = f"Converted the XTENSION keyword to {self._extension}."
def fix(header=self._header):
header[0] = (self._extension, self._ext_comment)
errs.append(
self.run_option(
option, err_text=err_text, fix_text=fix_text, fix=fix
)
)
self.req_cards("NAXIS", None, lambda v: (v == 2), 2, option, errs)
self.req_cards("BITPIX", None, lambda v: (v == 8), 8, option, errs)
self.req_cards(
"TFIELDS",
7,
lambda v: (_is_int(v) and v >= 0 and v <= 999),
0,
option,
errs,
)
tfields = self._header["TFIELDS"]
for idx in range(tfields):
self.req_cards("TFORM" + str(idx + 1), None, None, None, option, errs)
return errs
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
nrows = 0
else:
nrows = len(self.data)
ncols = len(self.columns)
format = self.columns.formats
# if data is not touched yet, use header info.
else:
nrows = self._header["NAXIS2"]
ncols = self._header["TFIELDS"]
format = ", ".join(
[self._header["TFORM" + str(j + 1)] for j in range(ncols)]
)
format = f"[{format}]"
dims = f"{nrows}R x {ncols}C"
ncards = len(self._header)
return (self.name, self.ver, class_name, ncards, dims, format)
def _update_column_removed(self, columns, idx):
super()._update_column_removed(columns, idx)
# Fix the header to reflect the column removal
self._clear_table_keywords(index=idx)
def _update_column_attribute_changed(
self, column, col_idx, attr, old_value, new_value
):
"""
Update the header when one of the column objects is updated.
"""
# base_keyword is the keyword without the index such as TDIM
# while keyword is like TDIM1
base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
keyword = base_keyword + str(col_idx + 1)
if keyword in self._header:
if new_value is None:
# If the new value is None, i.e. None was assigned to the
# column attribute, then treat this as equivalent to deleting
# that attribute
del self._header[keyword]
else:
self._header[keyword] = new_value
else:
keyword_idx = KEYWORD_NAMES.index(base_keyword)
# Determine the appropriate keyword to insert this one before/after
# if it did not already exist in the header
for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
before_keyword += str(col_idx + 1)
if before_keyword in self._header:
self._header.insert(
before_keyword, (keyword, new_value), after=True
)
break
else:
for after_keyword in KEYWORD_NAMES[keyword_idx + 1 :]:
after_keyword += str(col_idx + 1)
if after_keyword in self._header:
self._header.insert(after_keyword, (keyword, new_value))
break
else:
# Just append
self._header[keyword] = new_value
def _clear_table_keywords(self, index=None):
"""
Wipe out any existing table definition keywords from the header.
If specified, only clear keywords for the given table index (shifting
up keywords for any other columns). The index is zero-based.
Otherwise keywords for all columns.
"""
# First collect all the table structure related keyword in the header
# into a single list so we can then sort them by index, which will be
# useful later for updating the header in a sensible order (since the
# header *might* not already be written in a reasonable order)
table_keywords = []
for idx, keyword in enumerate(self._header.keys()):
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group("label")
except Exception:
continue # skip if there is no match
if base_keyword in KEYWORD_TO_ATTRIBUTE:
# TEMP: For Astropy 3.0 we don't clear away the following keywords
# as we are first raising a deprecation warning that these will be
# dropped automatically if they were specified in the header. We
# can remove this once we are happy to break backward-compatibility
if base_keyword in {
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
}:
continue
num = int(match.group("num")) - 1 # convert to zero-base
table_keywords.append((idx, match.group(0), base_keyword, num))
# First delete
rev_sorted_idx_0 = sorted(
table_keywords, key=operator.itemgetter(0), reverse=True
)
for idx, keyword, _, num in rev_sorted_idx_0:
if index is None or index == num:
del self._header[idx]
# Now shift up remaining column keywords if only one column was cleared
if index is not None:
sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))
for _, keyword, base_keyword, num in sorted_idx_3:
if num <= index:
continue
old_card = self._header.cards[keyword]
new_card = (base_keyword + str(num), old_card.value, old_card.comment)
self._header.insert(keyword, new_card)
del self._header[keyword]
# Also decrement TFIELDS
if "TFIELDS" in self._header:
self._header["TFIELDS"] -= 1
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
for idx, column in enumerate(self.columns):
for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():
val = getattr(column, attr)
if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
class TableHDU(_TableBaseHDU):
"""
FITS ASCII table extension HDU class.
Parameters
----------
data : array or `FITS_rec`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = "TABLE"
_ext_comment = "ASCII table extension"
_padding_byte = " "
_columns_type = _AsciiColDefs
__format_RE = re.compile(r"(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?")
def __init__(
self, data=None, header=None, name=None, ver=None, character_as_bytes=False
):
super().__init__(
data, header, name=name, ver=ver, character_as_bytes=character_as_bytes
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension == cls._extension
def _get_tbdata(self):
columns = self.columns
names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
dup = np.rec.find_duplicate(names)
if dup:
raise ValueError(f"Duplicate field names: {dup}")
# TODO: Determine if this extra logic is necessary--I feel like the
# _AsciiColDefs class should be responsible for telling the table what
# its dtype should be...
itemsize = columns.spans[-1] + columns.starts[-1] - 1
dtype = {}
for idx in range(len(columns)):
data_type = "S" + str(columns.spans[idx])
if idx == len(columns) - 1:
# The last column is padded out to the value of NAXIS1
if self._header["NAXIS1"] > itemsize:
data_type = "S" + str(
columns.spans[idx] + self._header["NAXIS1"] - itemsize
)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# We need to pad the data to a block length before calculating
# the datasum.
bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)
padding = np.frombuffer(_pad_length(self.size) * b" ", dtype=np.ubyte)
d = np.append(bytes_array, padding)
cs = self._compute_checksum(d)
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _verify(self, option="warn"):
"""
`TableHDU` verify method.
"""
errs = super()._verify(option=option)
self.req_cards("PCOUNT", None, lambda v: (v == 0), 0, option, errs)
tfields = self._header["TFIELDS"]
for idx in range(tfields):
self.req_cards("TBCOL" + str(idx + 1), None, _is_int, None, option, errs)
return errs
class BinTableHDU(_TableBaseHDU):
"""
Binary table HDU class.
Parameters
----------
data : array, `FITS_rec`, or `~astropy.table.Table`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = "BINTABLE"
_ext_comment = "binary table extension"
def __init__(
self,
data=None,
header=None,
name=None,
uint=False,
ver=None,
character_as_bytes=False,
):
if data is not None and data is not DELAYED:
from astropy.table import Table
if isinstance(data, Table):
from astropy.io.fits.convenience import table_to_hdu
hdu = table_to_hdu(data)
if header is not None:
hdu.header.update(header)
data = hdu.data
header = hdu.header
super().__init__(
data,
header,
name=name,
uint=uint,
ver=ver,
character_as_bytes=character_as_bytes,
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension in (cls._extension, "A3DTABLE")
def _calculate_datasum_with_heap(self):
"""
Calculate the value for the ``DATASUM`` card given the input data.
"""
with _binary_table_byte_swap(self.data) as data:
dout = data.view(type=np.ndarray, dtype=np.ubyte)
csum = self._compute_checksum(dout)
# Now add in the heap data to the checksum (we can skip any gap
# between the table and the heap since it's all zeros and doesn't
# contribute to the checksum
if data._get_raw_data() is None:
# This block is still needed because
# test_variable_length_table_data leads to ._get_raw_data
# returning None which means _get_heap_data doesn't work.
# Which happens when the data is loaded in memory rather than
# being unloaded on disk
for idx in range(data._nfields):
if isinstance(data.columns._recformats[idx], _FormatP):
for coldata in data.field(idx):
# coldata should already be byteswapped from the call
# to _binary_table_byte_swap
if not len(coldata):
continue
csum = self._compute_checksum(coldata, csum)
else:
csum = self._compute_checksum(data._get_heap_data(), csum)
return csum
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# This method calculates the datasum while incorporating any
# heap data, which is obviously not handled from the base
# _calculate_datasum
return self._calculate_datasum_with_heap()
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
with _binary_table_byte_swap(self.data) as data:
if _has_unicode_fields(data):
# If the raw data was a user-supplied recarray, we can't write
# unicode columns directly to the file, so we have to switch
# to a slower row-by-row write
self._writedata_by_row(fileobj)
else:
fileobj.writearray(data)
# write out the heap of variable length array columns this has
# to be done after the "regular" data is written (above)
# to avoid a bug in the lustre filesystem client, don't
# write 0-byte objects
if data._gap > 0:
fileobj.write((data._gap * "\0").encode("ascii"))
nbytes = data._gap
if not self._manages_own_heap:
# Write the heap data one column at a time, in the order
# that the data pointers appear in the column (regardless
# if that data pointer has a different, previous heap
# offset listed)
for idx in range(data._nfields):
if not isinstance(data.columns._recformats[idx], _FormatP):
continue
field = self.data.field(idx)
for row in field:
if len(row) > 0:
nbytes += row.nbytes
fileobj.writearray(row)
else:
heap_data = data._get_heap_data()
if len(heap_data) > 0:
nbytes += len(heap_data)
fileobj.writearray(heap_data)
data._heapsize = nbytes - data._gap
size += nbytes
size += self.data.size * self.data._raw_itemsize
return size
def _writedata_by_row(self, fileobj):
fields = [self.data.field(idx) for idx in range(len(self.data.columns))]
# Creating Record objects is expensive (as in
# `for row in self.data:` so instead we just iterate over the row
# indices and get one field at a time:
for idx in range(len(self.data)):
for field in fields:
item = field[idx]
field_width = None
if field.dtype.kind == "U":
# Read the field *width* by reading past the field kind.
i = field.dtype.str.index(field.dtype.kind)
field_width = int(field.dtype.str[i + 1 :])
item = np.char.encode(item, "ascii")
fileobj.writearray(item)
if field_width is not None:
j = item.dtype.str.index(item.dtype.kind)
item_length = int(item.dtype.str[j + 1 :])
# Fix padding problem (see #5296).
padding = "\x00" * (field_width - item_length)
fileobj.write(padding.encode("ascii"))
_tdump_file_format = textwrap.dedent(
"""
- **datafile:** Each line of the data file represents one row of table
data. The data is output one column at a time in column order. If
a column contains an array, each element of the column array in the
current row is output before moving on to the next column. Each row
ends with a new line.
Integer data is output right-justified in a 21-character field
followed by a blank. Floating point data is output right justified
using 'g' format in a 21-character field with 15 digits of
precision, followed by a blank. String data that does not contain
whitespace is output left-justified in a field whose width matches
the width specified in the ``TFORM`` header parameter for the
column, followed by a blank. When the string data contains
whitespace characters, the string is enclosed in quotation marks
(``""``). For the last data element in a row, the trailing blank in
the field is replaced by a new line character.
For column data containing variable length arrays ('P' format), the
array data is preceded by the string ``'VLA_Length= '`` and the
integer length of the array for that row, left-justified in a
21-character field, followed by a blank.
.. note::
This format does *not* support variable length arrays using the
('Q' format) due to difficult to overcome ambiguities. What this
means is that this file format cannot support VLA columns in
tables stored in files that are over 2 GB in size.
For column data representing a bit field ('X' format), each bit
value in the field is output right-justified in a 21-character field
as 1 (for true) or 0 (for false).
- **cdfile:** Each line of the column definitions file provides the
definitions for one column in the table. The line is broken up into
8, sixteen-character fields. The first field provides the column
name (``TTYPEn``). The second field provides the column format
(``TFORMn``). The third field provides the display format
(``TDISPn``). The fourth field provides the physical units
(``TUNITn``). The fifth field provides the dimensions for a
multidimensional array (``TDIMn``). The sixth field provides the
value that signifies an undefined value (``TNULLn``). The seventh
field provides the scale factor (``TSCALn``). The eighth field
provides the offset value (``TZEROn``). A field value of ``""`` is
used to represent the case where no value is provided.
- **hfile:** Each line of the header parameters file provides the
definition of a single HDU header card as represented by the card
image.
"""
)
def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):
"""
Dump the table HDU to a file in ASCII format. The table may be dumped
in three separate files, one containing column definitions, one
containing header parameters, and one for table data.
Parameters
----------
datafile : path-like or file-like, optional
Output data file. The default is the root name of the
fits file associated with this HDU appended with the
extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`, no
column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `dump` method is to allow viewing and editing
the table data and parameters in a standard text editor.
The `load` method can be used to create a new table from the three
plain text (ASCII) files.
"""
if isinstance(datafile, path_like):
datafile = os.path.expanduser(datafile)
if isinstance(cdfile, path_like):
cdfile = os.path.expanduser(cdfile)
if isinstance(hfile, path_like):
hfile = os.path.expanduser(hfile)
# check if the output files already exist
exist = []
files = [datafile, cdfile, hfile]
for f in files:
if isinstance(f, path_like):
if os.path.exists(f) and os.path.getsize(f) != 0:
if overwrite:
os.remove(f)
else:
exist.append(f)
if exist:
raise OSError(
" ".join([f"File '{f}' already exists." for f in exist])
+ " If you mean to replace the file(s) then use the argument "
"'overwrite=True'."
)
# Process the data
self._dump_data(datafile)
# Process the column definitions
if cdfile:
self._dump_coldefs(cdfile)
# Process the header parameters
if hfile:
self._header.tofile(hfile, sep="\n", endcard=False, padding=False)
if isinstance(dump.__doc__, str):
dump.__doc__ += _tdump_file_format.replace("\n", "\n ")
def load(cls, datafile, cdfile=None, hfile=None, replace=False, header=None):
"""
Create a table from the input ASCII files. The input is from up to
three separate files, one containing column definitions, one containing
header parameters, and one containing column data.
The column definition and header parameters files are not required.
When absent the column definitions and/or header parameters are taken
from the header object given in the header argument; otherwise sensible
defaults are inferred (though this mode is not recommended).
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like, optional
Input column definition file containing the names,
formats, display formats, physical units, multidimensional
array dimensions, undefined values, scale factors, and
offsets associated with the columns in the table. If
`None`, the column definitions are taken from the current
values in this object.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table. If
`None`, the header parameter definitions are taken from
the current values in this objects header.
replace : bool, optional
When `True`, indicates that the entire header should be
replaced with the contents of the ASCII file instead of
just updating the current header.
header : `~astropy.io.fits.Header`, optional
When the cdfile and hfile are missing, use this Header object in
the creation of the new table and HDU. Otherwise this Header
supersedes the keywords from hfile, which is only used to update
values not present in this Header, unless ``replace=True`` in which
this Header's values are completely replaced with the values from
hfile.
Notes
-----
The primary use for the `load` method is to allow the input of ASCII
data that was edited in a standard text editor of the table data and
parameters. The `dump` method can be used to create the initial ASCII
files.
"""
# Process the parameter file
if header is None:
header = Header()
if hfile:
if replace:
header = Header.fromtextfile(hfile)
else:
header.extend(
Header.fromtextfile(hfile), update=True, update_first=True
)
coldefs = None
# Process the column definitions file
if cdfile:
coldefs = cls._load_coldefs(cdfile)
# Process the data file
data = cls._load_data(datafile, coldefs)
if coldefs is None:
coldefs = ColDefs(data)
# Create a new HDU using the supplied header and data
hdu = cls(data=data, header=header)
hdu.columns = coldefs
return hdu
if isinstance(load.__doc__, str):
load.__doc__ += _tdump_file_format.replace("\n", "\n ")
load = classmethod(load)
# Have to create a classmethod from this here instead of as a decorator;
# otherwise we can't update __doc__
def _dump_data(self, fileobj):
"""
Write the table data in the ASCII format read by BinTableHDU.load()
to fileobj.
"""
if not fileobj and self._file:
root = os.path.splitext(self._file.name)[0]
fileobj = root + ".txt"
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "w")
close_file = True
linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)
# Process each row of the table and output one row at a time
def format_value(val, format):
if format[0] == "S":
itemsize = int(format[1:])
return "{:{size}}".format(val, size=itemsize)
elif format in np.typecodes["AllInteger"]:
# output integer
return f"{val:21d}"
elif format in np.typecodes["Complex"]:
return f"{val.real:21.15g}+{val.imag:.15g}j"
elif format in np.typecodes["Float"]:
# output floating point
return f"{val:#21.15g}"
for row in self.data:
line = [] # the line for this row of the table
# Process each column of the row.
for column in self.columns:
# format of data in a variable length array
# where None means it is not a VLA:
vla_format = None
format = _convert_format(column.format)
if isinstance(format, _FormatP):
# P format means this is a variable length array so output
# the length of the array for this row and set the format
# for the VLA data
line.append("VLA_Length=")
line.append(f"{len(row[column.name]):21d}")
_, dtype, option = _parse_tformat(column.format)
vla_format = FITS2NUMPY[option[0]][0]
if vla_format:
# Output the data for each element in the array
for val in row[column.name].flat:
line.append(format_value(val, vla_format))
else:
# The column data is a single element
dtype = self.data.dtype.fields[column.name][0]
array_format = dtype.char
if array_format == "V":
array_format = dtype.base.char
if array_format == "S":
array_format += str(dtype.itemsize)
if dtype.char == "V":
for value in row[column.name].flat:
line.append(format_value(value, array_format))
else:
line.append(format_value(row[column.name], array_format))
linewriter.writerow(line)
if close_file:
fileobj.close()
def _dump_coldefs(self, fileobj):
"""
Write the column definition parameters in the ASCII format read by
BinTableHDU.load() to fileobj.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "w")
close_file = True
# Process each column of the table and output the result to the
# file one at a time
for column in self.columns:
line = [column.name, column.format]
attrs = ["disp", "unit", "dim", "null", "bscale", "bzero"]
line += [
"{!s:16s}".format(value if value else '""')
for value in (getattr(column, attr) for attr in attrs)
]
fileobj.write(" ".join(line))
fileobj.write("\n")
if close_file:
fileobj.close()
@classmethod
def _load_data(cls, fileobj, coldefs=None):
"""
Read the table data from the ASCII file output by BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
fileobj = open(fileobj)
close_file = True
initialpos = fileobj.tell() # We'll be returning here later
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
# First we need to do some preprocessing on the file to find out how
# much memory we'll need to reserve for the table. This is necessary
# even if we already have the coldefs in order to determine how many
# rows to reserve memory for
vla_lengths = []
recformats = []
names = []
nrows = 0
if coldefs is not None:
recformats = coldefs._recformats
names = coldefs.names
def update_recformats(value, idx):
fitsformat = _scalar_to_format(value)
recformat = _convert_format(fitsformat)
if idx >= len(recformats):
recformats.append(recformat)
else:
if _cmp_recformats(recformats[idx], recformat) < 0:
recformats[idx] = recformat
# TODO: The handling of VLAs could probably be simplified a bit
for row in linereader:
nrows += 1
if coldefs is not None:
continue
col = 0
idx = 0
while idx < len(row):
if row[idx] == "VLA_Length=":
if col < len(vla_lengths):
vla_length = vla_lengths[col]
else:
vla_length = int(row[idx + 1])
vla_lengths.append(vla_length)
idx += 2
while vla_length:
update_recformats(row[idx], col)
vla_length -= 1
idx += 1
col += 1
else:
if col >= len(vla_lengths):
vla_lengths.append(None)
update_recformats(row[idx], col)
col += 1
idx += 1
# Update the recformats for any VLAs
for idx, length in enumerate(vla_lengths):
if length is not None:
recformats[idx] = str(length) + recformats[idx]
dtype = np.rec.format_parser(recformats, names, None).dtype
# TODO: In the future maybe enable loading a bit at a time so that we
# can convert from this format to an actual FITS file on disk without
# needing enough physical memory to hold the entire thing at once
hdu = BinTableHDU.from_columns(
np.recarray(shape=1, dtype=dtype), nrows=nrows, fill=True
)
# TODO: It seems to me a lot of this could/should be handled from
# within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
arr = data.columns._arrays[idx]
dt = recformats[idx][len(str(length)) :]
# NOTE: FormatQ not supported here; it's hard to determine
# whether or not it will be necessary to use a wider descriptor
# type. The function documentation will have to serve as a
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
name = data.columns.names[idx]
data._cache_field(name, _makep(arr, arr, recformats[idx]))
def format_value(col, val):
# Special formatting for a couple particular data types
if recformats[col] == FITS2NUMPY["L"]:
return bool(int(val))
elif recformats[col] == FITS2NUMPY["M"]:
# For some reason, in arrays/fields where numpy expects a
# complex it's not happy to take a string representation
# (though it's happy to do that in other contexts), so we have
# to convert the string representation for it:
return complex(val)
else:
return val
# Jump back to the start of the data and create a new line reader
fileobj.seek(initialpos)
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
for row, line in enumerate(linereader):
col = 0
idx = 0
while idx < len(line):
if line[idx] == "VLA_Length=":
vla_len = vla_lengths[col]
idx += 2
slice_ = slice(idx, idx + vla_len)
data[row][col][:] = line[idx : idx + vla_len]
idx += vla_len
elif dtype[col].shape:
# This is an array column
array_size = int(np.multiply.reduce(dtype[col].shape))
slice_ = slice(idx, idx + array_size)
idx += array_size
else:
slice_ = None
if slice_ is None:
# This is a scalar row element
data[row][col] = format_value(col, line[idx])
idx += 1
else:
data[row][col].flat[:] = [
format_value(col, val) for val in line[slice_]
]
col += 1
if close_file:
fileobj.close()
return data
@classmethod
def _load_coldefs(cls, fileobj):
"""
Read the table column definitions from the ASCII file output by
BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
fileobj = open(fileobj)
close_file = True
columns = []
for line in fileobj:
words = line[:-1].split()
kwargs = {}
for key in ["name", "format", "disp", "unit", "dim"]:
kwargs[key] = words.pop(0).replace('""', "")
for key in ["null", "bscale", "bzero"]:
word = words.pop(0).replace('""', "")
if word:
word = _str_to_num(word)
kwargs[key] = word
columns.append(Column(**kwargs))
if close_file:
fileobj.close()
return ColDefs(columns)
@contextlib.contextmanager
def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (
not isinstance(c, chararray.chararray)
and c.itemsize > 1
and c.dtype.str[0] in swap_types
):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({"names": names, "formats": formats, "offsets": offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype
|
6f01f1be853f737e781c4751b4d2b0fa7ee853e3e0bfc413435581c00719b155 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import mmap
import sys
import warnings
import numpy as np
from astropy.io.fits.header import Header
from astropy.io.fits.util import (
_is_dask_array,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
)
from astropy.io.fits.verify import VerifyWarning
from astropy.utils import isiterable, lazyproperty
from .base import BITPIX2DTYPE, DELAYED, DTYPE2BITPIX, ExtensionHDU, _ValidHDU
__all__ = ["Section", "PrimaryHDU", "ImageHDU"]
class _ImageBaseHDU(_ValidHDU):
"""FITS image HDU base class.
Attributes
----------
header
image header
data
image data
"""
standard_keyword_comments = {
"SIMPLE": "conforms to FITS standard",
"XTENSION": "Image extension",
"BITPIX": "array data type",
"NAXIS": "number of array dimensions",
"GROUPS": "has groups",
"PCOUNT": "number of parameters",
"GCOUNT": "number of groups",
}
def __init__(
self,
data=None,
header=None,
do_not_scale_image_data=False,
uint=True,
scale_back=False,
ignore_blank=False,
**kwargs,
):
from .groups import GroupsHDU
super().__init__(data=data, header=header)
if data is DELAYED:
# Presumably if data is DELAYED then this HDU is coming from an
# open file, and was not created in memory
if header is None:
# this should never happen
raise ValueError("No header to setup HDU.")
else:
# TODO: Some of this card manipulation should go into the
# PrimaryHDU and GroupsHDU subclasses
# construct a list of cards of minimal header
if isinstance(self, ExtensionHDU):
c0 = ("XTENSION", "IMAGE", self.standard_keyword_comments["XTENSION"])
else:
c0 = ("SIMPLE", True, self.standard_keyword_comments["SIMPLE"])
cards = [
c0,
("BITPIX", 8, self.standard_keyword_comments["BITPIX"]),
("NAXIS", 0, self.standard_keyword_comments["NAXIS"]),
]
if isinstance(self, GroupsHDU):
cards.append(("GROUPS", True, self.standard_keyword_comments["GROUPS"]))
if isinstance(self, (ExtensionHDU, GroupsHDU)):
cards.append(("PCOUNT", 0, self.standard_keyword_comments["PCOUNT"]))
cards.append(("GCOUNT", 1, self.standard_keyword_comments["GCOUNT"]))
if header is not None:
orig = header.copy()
header = Header(cards)
header.extend(orig, strip=True, update=True, end=True)
else:
header = Header(cards)
self._header = header
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
# Keep track of whether BZERO/BSCALE were set from the header so that
# values for self._orig_bzero and self._orig_bscale can be set
# properly, if necessary, once the data has been set.
bzero_in_header = "BZERO" in self._header
bscale_in_header = "BSCALE" in self._header
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
# Save off other important values from the header needed to interpret
# the image data
self._axes = [
self._header.get("NAXIS" + str(axis + 1), 0)
for axis in range(self._header.get("NAXIS", 0))
]
# Not supplying a default for BITPIX makes sense because BITPIX
# is either in the header or should be determined from the dtype of
# the data (which occurs when the data is set).
self._bitpix = self._header.get("BITPIX")
self._gcount = self._header.get("GCOUNT", 1)
self._pcount = self._header.get("PCOUNT", 0)
self._blank = None if ignore_blank else self._header.get("BLANK")
self._verify_blank()
self._orig_bitpix = self._bitpix
self._orig_blank = self._header.get("BLANK")
# These get set again below, but need to be set to sensible defaults
# here.
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
# Set the name attribute if it was provided (if this is an ImageHDU
# this will result in setting the EXTNAME keyword of the header as
# well)
if "name" in kwargs and kwargs["name"]:
self.name = kwargs["name"]
if "ver" in kwargs and kwargs["ver"]:
self.ver = kwargs["ver"]
# Set to True if the data or header is replaced, indicating that
# update_header should be called
self._modified = False
if data is DELAYED:
if not do_not_scale_image_data and (self._bscale != 1 or self._bzero != 0):
# This indicates that when the data is accessed or written out
# to a new file it will need to be rescaled
self._data_needs_rescale = True
return
else:
# Setting data will update the header and set _bitpix, _bzero,
# and _bscale to the appropriate BITPIX for the data, and always
# sets _bzero=0 and _bscale=1.
self.data = data
# Check again for BITPIX/BSCALE/BZERO in case they changed when the
# data was assigned. This can happen, for example, if the input
# data is an unsigned int numpy array.
self._bitpix = self._header.get("BITPIX")
# Do not provide default values for BZERO and BSCALE here because
# the keywords will have been deleted in the header if appropriate
# after scaling. We do not want to put them back in if they
# should not be there.
self._bzero = self._header.get("BZERO")
self._bscale = self._header.get("BSCALE")
# Handle case where there was no BZERO/BSCALE in the initial header
# but there should be a BSCALE/BZERO now that the data has been set.
if not bzero_in_header:
self._orig_bzero = self._bzero
if not bscale_in_header:
self._orig_bscale = self._bscale
@classmethod
def match_header(cls, header):
"""
_ImageBaseHDU is sort of an abstract class for HDUs containing image
data (as opposed to table data) and should never be used directly.
"""
raise NotImplementedError
@property
def is_image(self):
return True
@property
def section(self):
"""
Access a section of the image array without loading the entire array
into memory. The :class:`Section` object returned by this attribute is
not meant to be used directly by itself. Rather, slices of the section
return the appropriate slice of the data, and loads *only* that section
into memory.
Sections are useful for retrieving a small subset of data from a remote
file that has been opened with the ``use_fsspec=True`` parameter.
For example, you can use this feature to download a small cutout from
a large FITS image hosted in the Amazon S3 cloud (see the
:ref:`astropy:fits-cloud-files` section of the Astropy
documentation for more details.)
For local files, sections are mostly obsoleted by memmap support, but
should still be used to deal with very large scaled images.
Note that sections cannot currently be written to. Moreover, any
in-memory updates to the image's ``.data`` property may not be
reflected in the slices obtained via ``.section``. See the
:ref:`astropy:data-sections` section of the documentation for
more details.
"""
return Section(self)
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@property
def header(self):
return self._header
@header.setter
def header(self, header):
self._header = header
self._modified = True
self.update_header()
@lazyproperty
def data(self):
"""
Image/array data as a `~numpy.ndarray`.
Please remember that the order of axes on an Numpy array are opposite
of the order specified in the FITS file. For example for a 2D image
the "rows" or y-axis are the first dimension, and the "columns" or
x-axis are the second dimension.
If the data is scaled using the BZERO and BSCALE parameters, this
attribute returns the data scaled to its physical values unless the
file was opened with ``do_not_scale_image_data=True``.
"""
if len(self._axes) < 1:
return
data = self._get_scaled_image_data(self._data_offset, self.shape)
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if "data" in self.__dict__ and self.__dict__["data"] is not None:
if self.__dict__["data"] is data:
return
else:
self._data_replaced = True
was_unsigned = _is_pseudo_integer(self.__dict__["data"].dtype)
else:
self._data_replaced = True
was_unsigned = False
if data is not None:
if not isinstance(data, np.ndarray) and not _is_dask_array(data):
# Try to coerce the data into a numpy array--this will work, on
# some level, for most objects
try:
data = np.array(data)
except Exception: # pragma: no cover
raise TypeError(
f"data object {data!r} could not be coerced into an " f"ndarray"
)
if data.shape == ():
raise TypeError(
f"data object {data!r} should have at least one dimension"
)
self.__dict__["data"] = data
self._modified = True
if data is None:
self._axes = []
else:
# Set new values of bitpix, bzero, and bscale now, but wait to
# revise original values until header is updated.
self._bitpix = DTYPE2BITPIX[data.dtype.name]
self._bscale = 1
self._bzero = 0
self._blank = None
self._axes = list(data.shape)
self._axes.reverse()
# Update the header, including adding BZERO/BSCALE if new data is
# unsigned. Does not change the values of self._bitpix,
# self._orig_bitpix, etc.
self.update_header()
if data is not None and was_unsigned:
self._update_header_scale_info(data.dtype)
# Keep _orig_bitpix as it was until header update is done, then
# set it, to allow easier handling of the case of unsigned
# integer data being converted to something else. Setting these here
# is needed only for the case do_not_scale_image_data=True when
# setting the data to unsigned int.
# If necessary during initialization, i.e. if BSCALE and BZERO were
# not in the header but the data was unsigned, the attributes below
# will be update in __init__.
self._orig_bitpix = self._bitpix
self._orig_bscale = self._bscale
self._orig_bzero = self._bzero
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
def update_header(self):
"""
Update the header keywords to agree with the data.
"""
if not (
self._modified
or self._header._modified
or (self._has_data and self.shape != self.data.shape)
):
# Not likely that anything needs updating
return
old_naxis = self._header.get("NAXIS", 0)
if "BITPIX" not in self._header:
bitpix_comment = self.standard_keyword_comments["BITPIX"]
else:
bitpix_comment = self._header.comments["BITPIX"]
# Update the BITPIX keyword and ensure it's in the correct
# location in the header
self._header.set("BITPIX", self._bitpix, bitpix_comment, after=0)
# If the data's shape has changed (this may have happened without our
# noticing either via a direct update to the data.shape attribute) we
# need to update the internal self._axes
if self._has_data and self.shape != self.data.shape:
self._axes = list(self.data.shape)
self._axes.reverse()
# Update the NAXIS keyword and ensure it's in the correct location in
# the header
if "NAXIS" in self._header:
naxis_comment = self._header.comments["NAXIS"]
else:
naxis_comment = self.standard_keyword_comments["NAXIS"]
self._header.set("NAXIS", len(self._axes), naxis_comment, after="BITPIX")
# TODO: This routine is repeated in several different classes--it
# should probably be made available as a method on all standard HDU
# types
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
naxisn = "NAXIS" + str(idx + 1)
if naxisn in self._header:
self._header[naxisn] = axis
else:
if idx == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(idx)
self._header.set(naxisn, axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header["NAXIS" + str(idx)]
except KeyError:
pass
if "BLANK" in self._header:
self._blank = self._header["BLANK"]
# Add BSCALE/BZERO to header if data is unsigned int.
self._update_pseudo_int_scale_keywords()
self._modified = False
def _update_header_scale_info(self, dtype=None):
"""
Delete BSCALE/BZERO from header if necessary.
"""
# Note that _dtype_for_bitpix determines the dtype based on the
# "original" values of bitpix, bscale, and bzero, stored in
# self._orig_bitpix, etc. It contains the logic for determining which
# special cases of BZERO/BSCALE, if any, are auto-detected as following
# the FITS unsigned int convention.
# Added original_was_unsigned with the intent of facilitating the
# special case of do_not_scale_image_data=True and uint=True
# eventually.
# FIXME: unused, maybe it should be useful?
# if self._dtype_for_bitpix() is not None:
# original_was_unsigned = self._dtype_for_bitpix().kind == 'u'
# else:
# original_was_unsigned = False
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1
):
return
if dtype is None:
dtype = self._dtype_for_bitpix()
if (
dtype is not None
and dtype.kind == "u"
and (self._scale_back or self._scale_back is None)
):
# Data is pseudo-unsigned integers, and the scale_back option
# was not explicitly set to False, so preserve all the scale
# factors
return
for keyword in ["BSCALE", "BZERO"]:
try:
del self._header[keyword]
# Since _update_header_scale_info can, currently, be called
# *after* _prewriteto(), replace these with blank cards so
# the header size doesn't change
self._header.append()
except KeyError:
pass
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self._header["BITPIX"] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self._header["BITPIX"]
self._blank = self._header.pop("BLANK", None)
def scale(self, type=None, option="old", bscale=None, bzero=None):
"""
Scale image data by using ``BSCALE``/``BZERO``.
Call to this method will scale `data` and update the keywords of
``BSCALE`` and ``BZERO`` in the HDU's header. This method should only
be used right before writing to the output file, as the data will be
scaled and is therefore not very usable after the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy
dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'``
etc.). If is `None`, use the current data type.
option : str, optional
How to scale the data: ``"old"`` uses the original ``BSCALE`` and
``BZERO`` values from when the data was read/created (defaulting to
1 and 0 if they don't exist). For integer data only, ``"minmax"``
uses the minimum and maximum of the data to scale. User-specified
``bscale``/``bzero`` values always take precedence.
bscale, bzero : int, optional
User-specified ``BSCALE`` and ``BZERO`` values
"""
# Disable blank support for now
self._scale_internal(
type=type, option=option, bscale=bscale, bzero=bzero, blank=None
)
def _scale_internal(
self, type=None, option="old", bscale=None, bzero=None, blank=0
):
"""
This is an internal implementation of the `scale` method, which
also supports handling BLANK properly.
TODO: This is only needed for fixing #3865 without introducing any
public API changes. We should support BLANK better when rescaling
data, and when that is added the need for this internal interface
should go away.
Note: the default of ``blank=0`` merely reflects the current behavior,
and is not necessarily a deliberate choice (better would be to disallow
conversion of floats to ints without specifying a BLANK if there are
NaN/inf values).
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale is not None and bzero is not None:
_scale = bscale
_zero = bzero
elif bscale is not None:
_scale = bscale
_zero = 0
elif bzero is not None:
_scale = 1
_zero = bzero
elif (
option == "old"
and self._orig_bscale is not None
and self._orig_bzero is not None
):
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == "minmax" and not issubclass(_type, np.floating):
if _is_dask_array(self.data):
min = self.data.min().compute()
max = self.data.max().compute()
else:
min = np.minimum.reduce(self.data.flat)
max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = min
_scale = (max - min) / (2.0**8 - 1)
else:
_zero = (max + min) / 2.0
# throw away -2^N
nbytes = 8 * _type().itemsize
_scale = (max - min) / (2.0**nbytes - 2)
else:
_scale = 1
_zero = 0
# Do the scaling
if _zero != 0:
if _is_dask_array(self.data):
self.data = self.data - _zero
else:
# 0.9.6.3 to avoid out of range error for BZERO = +32768
# We have to explicitly cast _zero to prevent numpy from raising an
# error when doing self.data -= zero, and we do this instead of
# self.data = self.data - zero to avoid doubling memory usage.
np.add(self.data, -_zero, out=self.data, casting="unsafe")
self._header["BZERO"] = _zero
else:
try:
del self._header["BZERO"]
except KeyError:
pass
if _scale and _scale != 1:
self.data = self.data / _scale
self._header["BSCALE"] = _scale
else:
try:
del self._header["BSCALE"]
except KeyError:
pass
# Set blanks
if blank is not None and issubclass(_type, np.integer):
# TODO: Perhaps check that the requested BLANK value fits in the
# integer type being scaled to?
self.data[np.isnan(self.data)] = blank
self._header["BLANK"] = blank
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type)
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
self._blank = blank
self._header["BITPIX"] = self._bitpix
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_blank = self._blank
def _verify(self, option="warn"):
# update_header can fix some things that would otherwise cause
# verification to fail, so do that now...
self.update_header()
self._verify_blank()
return super()._verify(option)
def _verify_blank(self):
# Probably not the best place for this (it should probably happen
# in _verify as well) but I want to be able to raise this warning
# both when the HDU is created and when written
if self._blank is None:
return
messages = []
# TODO: Once the FITSSchema framewhere is merged these warnings
# should be handled by the schema
if not _is_int(self._blank):
messages.append(
"Invalid value for 'BLANK' keyword in header: {!r} "
"The 'BLANK' keyword must be an integer. It will be "
"ignored in the meantime.".format(self._blank)
)
self._blank = None
if not self._bitpix > 0:
messages.append(
"Invalid 'BLANK' keyword in header. The 'BLANK' keyword "
"is only applicable to integer data, and will be ignored "
"in this HDU."
)
self._blank = None
for msg in messages:
warnings.warn(msg, VerifyWarning)
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self._scale_internal(
BITPIX2DTYPE[self._orig_bitpix], blank=self._orig_blank
)
self.update_header()
if not inplace and self._data_needs_rescale:
# Go ahead and load the scaled image data and update the header
# with the correct post-rescaling headers
_ = self.data
return super()._prewriteto(checksum, inplace)
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
elif _is_dask_array(self.data):
return self._writeinternal_dask(fileobj)
else:
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f">i{self.data.dtype.itemsize}",
)
should_swap = False
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = byteorder in swap_types
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _writeinternal_dask(self, fileobj):
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
raise NotImplementedError("This dtype isn't currently supported with dask.")
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = byteorder in swap_types
if should_swap:
from dask.utils import M
# NOTE: the inplace flag to byteswap needs to be False otherwise the array is
# byteswapped in place every time it is computed and this affects
# the input dask array.
output = output.map_blocks(M.byteswap, False).map_blocks(
M.newbyteorder, "S"
)
initial_position = fileobj.tell()
n_bytes = output.nbytes
# Extend the file n_bytes into the future
fileobj.seek(initial_position + n_bytes - 1)
fileobj.write(b"\0")
fileobj.flush()
if fileobj.fileobj_mode not in ("rb+", "wb+", "ab+"):
# Use another file handle if the current one is not in
# read/write mode
fp = open(fileobj.name, mode="rb+")
should_close = True
else:
fp = fileobj._file
should_close = False
try:
outmmap = mmap.mmap(
fp.fileno(), length=initial_position + n_bytes, access=mmap.ACCESS_WRITE
)
outarr = np.ndarray(
shape=output.shape,
dtype=output.dtype,
offset=initial_position,
buffer=outmmap,
)
output.store(outarr, lock=True, compute=True)
finally:
if should_close:
fp.close()
outmmap.close()
# On Windows closing the memmap causes the file pointer to return to 0, so
# we need to go back to the end of the data (since padding may be written
# after)
fileobj.seek(initial_position + n_bytes)
return n_bytes
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
if bitpix == 8 and self._orig_bzero == -128:
return np.dtype("int8")
for bits, dtype in (
(16, np.dtype("uint16")),
(32, np.dtype("uint32")),
(64, np.dtype("uint64")),
):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype("float64")
elif bitpix > 0: # scale integers to Float32
return np.dtype("float32")
def _convert_pseudo_integer(self, data):
"""
Handle "pseudo-unsigned" integers, if the user requested it. Returns
the converted data array if so; otherwise returns None.
In this case case, we don't need to handle BLANK to convert it to NAN,
since we can't do NaNs with integers, anyway, i.e. the user is
responsible for managing blanks.
"""
dtype = self._dtype_for_bitpix()
# bool(dtype) is always False--have to explicitly compare to None; this
# caused a fair amount of hair loss
if dtype is not None and dtype.kind == "u":
# Convert the input raw data into an unsigned integer array and
# then scale the data adjusting for the value of BZERO. Note that
# we subtract the value of BZERO instead of adding because of the
# way numpy converts the raw signed array into an unsigned array.
bits = dtype.itemsize * 8
data = np.array(data, dtype=dtype)
data -= np.uint64(1 << (bits - 1))
return data
def _get_scaled_image_data(self, offset, shape):
"""
Internal function for reading image data from a file and apply scale
factors to it. Normally this is used for the entire image, but it
supports alternate offset/shape for Section support.
"""
code = BITPIX2DTYPE[self._orig_bitpix]
raw_data = self._get_raw_data(shape, code, offset)
raw_data.dtype = raw_data.dtype.newbyteorder(">")
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1 and self._blank is None
):
# No further conversion of the data is necessary
return raw_data
try:
if self._file.strict_memmap:
raise ValueError(
"Cannot load a memory-mapped image: "
"BZERO/BSCALE/BLANK header keywords present. "
"Set memmap=False."
)
except AttributeError: # strict_memmap not set
pass
data = None
if not (self._orig_bzero == 0 and self._orig_bscale == 1):
data = self._convert_pseudo_integer(raw_data)
if data is None:
# In these cases, we end up with floating-point arrays and have to
# apply bscale and bzero. We may have to handle BLANK and convert
# to NaN in the resulting floating-point arrays.
# The BLANK keyword should only be applied for integer data (this
# is checked in __init__ but it can't hurt to double check here)
blanks = None
if self._blank is not None and self._bitpix > 0:
blanks = raw_data.flat == self._blank
# The size of blanks in bytes is the number of elements in
# raw_data.flat. However, if we use np.where instead we will
# only use 8 bytes for each index where the condition is true.
# So if the number of blank items is fewer than
# len(raw_data.flat) / 8, using np.where will use less memory
if blanks.sum() < len(blanks) / 8:
blanks = np.where(blanks)
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
data = np.array(raw_data, dtype=new_dtype)
else: # floating point cases
if self._file is not None and self._file.memmap:
data = raw_data.copy()
elif not raw_data.flags.writeable:
# create a writeable copy if needed
data = raw_data.copy()
# if not memmap, use the space already in memory
else:
data = raw_data
del raw_data
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
data += self._orig_bzero
if self._blank:
data.flat[blanks] = np.nan
return data
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
format = ""
else:
format = self.data.dtype.name
format = format[format.rfind(".") + 1 :]
else:
if self.shape and all(self.shape):
# Only show the format if all the dimensions are non-zero
# if data is not touched yet, use header info.
format = BITPIX2DTYPE[self._bitpix]
else:
format = ""
if (
format
and not self._do_not_scale_image_data
and (self._orig_bscale != 1 or self._orig_bzero != 0)
):
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
format += f" (rescales to {new_dtype.name})"
# Display shape in FITS-order
shape = tuple(reversed(self.shape))
return (self.name, self.ver, class_name, len(self._header), shape, format, "")
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
d = self.data
# First handle the special case where the data is unsigned integer
# 16, 32 or 64
if _is_pseudo_integer(self.data.dtype):
d = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f"i{self.data.dtype.itemsize}",
)
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
if d.dtype.str[0] != ">":
if d.flags.writeable:
byteswapped = True
d = d.byteswap(True)
d.dtype = d.dtype.newbyteorder(">")
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = d.byteswap(False)
d.dtype = d.dtype.newbyteorder(">")
byteswapped = False
else:
byteswapped = False
cs = self._compute_checksum(d.flatten().view(np.uint8))
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped and not _is_pseudo_integer(self.data.dtype):
d.byteswap(True)
d.dtype = d.dtype.newbyteorder("<")
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
class Section:
"""
Class enabling subsets of ImageHDU data to be loaded lazily via slicing.
Slices of this object load the corresponding section of an image array from
the underlying FITS file, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`astropy:data-sections` section of the Astropy documentation
for more details.
"""
def __init__(self, hdu):
self.hdu = hdu
@property
def shape(self):
# Implementing `.shape` enables `astropy.nddata.Cutout2D` to accept
# `ImageHDU.section` in place of `.data`.
return self.hdu.shape
def __getitem__(self, key):
"""Returns a slice of HDU data specified by `key`.
If the image HDU is backed by a file handle, this method will only read
the chunks of the file needed to extract `key`, which is useful in
situations where the file is located on a slow or remote file system
(e.g., cloud storage).
"""
if not isinstance(key, tuple):
key = (key,)
naxis = len(self.hdu.shape)
return_scalar = (
all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis
)
if not any(k is Ellipsis for k in key):
# We can always add a ... at the end, after making note of whether
# to return a scalar.
key += (Ellipsis,)
ellipsis_count = len([k for k in key if k is Ellipsis])
if len(key) - ellipsis_count > naxis or ellipsis_count > 1:
raise IndexError("too many indices for array")
# Insert extra dimensions as needed.
idx = next(i for i, k in enumerate(key + (Ellipsis,)) if k is Ellipsis)
key = key[:idx] + (slice(None),) * (naxis - len(key) + 1) + key[idx + 1 :]
return_0dim = (
all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis
)
dims = []
offset = 0
# Find all leading axes for which a single point is used.
for idx in range(naxis):
axis = self.hdu.shape[idx]
indx = _IndexInfo(key[idx], axis)
offset = offset * axis + indx.offset
if not _is_int(key[idx]):
dims.append(indx.npts)
break
is_contiguous = indx.contiguous
for jdx in range(idx + 1, naxis):
axis = self.hdu.shape[jdx]
indx = _IndexInfo(key[jdx], axis)
dims.append(indx.npts)
if indx.npts == axis and indx.contiguous:
# The offset needs to multiply the length of all remaining axes
offset *= axis
else:
is_contiguous = False
if is_contiguous:
dims = tuple(dims) or (1,)
bitpix = self.hdu._orig_bitpix
offset = self.hdu._data_offset + offset * abs(bitpix) // 8
# Note: the actual file read operations are delegated to
# `util._array_from_file` via `ImageHDU._get_scaled_image_data`
data = self.hdu._get_scaled_image_data(offset, dims)
else:
data = self._getdata(key)
if return_scalar:
data = data.item()
elif return_0dim:
data = data.squeeze()
return data
def _getdata(self, keys):
for idx, (key, axis) in enumerate(zip(keys, self.hdu.shape)):
if isinstance(key, slice):
ks = range(*key.indices(axis))
break
elif isiterable(key):
# Handle both integer and boolean arrays.
ks = np.arange(axis, dtype=int)[key]
break
# This should always break at some point if _getdata is called.
data = [self[keys[:idx] + (k,) + keys[idx + 1 :]] for k in ks]
if any(isinstance(key, slice) or isiterable(key) for key in keys[idx + 1 :]):
# data contains multidimensional arrays; combine them.
return np.array(data)
else:
# Only singleton dimensions remain; concatenate in a 1D array.
return np.concatenate([np.atleast_1d(array) for array in data])
class PrimaryHDU(_ImageBaseHDU):
"""
FITS primary HDU class.
"""
_default_name = "PRIMARY"
def __init__(
self,
data=None,
header=None,
do_not_scale_image_data=False,
ignore_blank=False,
uint=True,
scale_back=None,
):
"""
Construct a primary HDU.
Parameters
----------
data : array or ``astropy.io.fits.hdu.base.DELAYED``, optional
The data in the HDU.
header : `~astropy.io.fits.Header`, optional
The header to be used (as a template). If ``header`` is `None`, a
minimal header will be provided.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
ignore_blank : bool, optional
If `True`, the BLANK header keyword will be ignored if present.
Otherwise, pixels equal to this value will be replaced with
NaNs. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
"""
super().__init__(
data=data,
header=header,
do_not_scale_image_data=do_not_scale_image_data,
uint=uint,
ignore_blank=ignore_blank,
scale_back=scale_back,
)
# insert the keywords EXTEND
if header is None:
dim = self._header["NAXIS"]
if dim == 0:
dim = ""
self._header.set("EXTEND", True, after="NAXIS" + str(dim))
@classmethod
def match_header(cls, header):
card = header.cards[0]
# Due to problems discussed in #5808, we cannot assume the 'GROUPS'
# keyword to be True/False, have to check the value
return (
card.keyword == "SIMPLE"
and ("GROUPS" not in header or header["GROUPS"] != True) # noqa: E712
and card.value
)
def update_header(self):
super().update_header()
# Update the position of the EXTEND keyword if it already exists
if "EXTEND" in self._header:
if len(self._axes):
after = "NAXIS" + str(len(self._axes))
else:
after = "NAXIS"
self._header.set("EXTEND", after=after)
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
# The EXTEND keyword is only mandatory if the HDU has extensions; this
# condition is checked by the HDUList object. However, if we already
# have an EXTEND keyword check that its position is correct
if "EXTEND" in self._header:
naxis = self._header.get("NAXIS", 0)
self.req_cards(
"EXTEND", naxis + 3, lambda v: isinstance(v, bool), True, option, errs
)
return errs
class ImageHDU(_ImageBaseHDU, ExtensionHDU):
"""
FITS image extension HDU class.
"""
_extension = "IMAGE"
def __init__(
self,
data=None,
header=None,
name=None,
do_not_scale_image_data=False,
uint=True,
scale_back=None,
ver=None,
):
"""
Construct an image HDU.
Parameters
----------
data : array
The data in the HDU.
header : `~astropy.io.fits.Header`
The header to be used (as a template). If ``header`` is
`None`, a minimal header will be provided.
name : str, optional
The name of the HDU, will be the value of the keyword
``EXTNAME``.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
"""
# This __init__ currently does nothing differently from the base class,
# and is only explicitly defined for the docstring.
super().__init__(
data=data,
header=header,
name=name,
do_not_scale_image_data=do_not_scale_image_data,
uint=uint,
scale_back=scale_back,
ver=ver,
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension == cls._extension
def _verify(self, option="warn"):
"""
ImageHDU verify method.
"""
errs = super()._verify(option=option)
naxis = self._header.get("NAXIS", 0)
# PCOUNT must == 0, GCOUNT must == 1; the former is verified in
# ExtensionHDU._verify, however ExtensionHDU._verify allows PCOUNT
# to be >= 0, so we need to check it here
self.req_cards(
"PCOUNT", naxis + 3, lambda v: (_is_int(v) and v == 0), 0, option, errs
)
return errs
class _IndexInfo:
def __init__(self, indx, naxis):
if _is_int(indx):
if indx < 0: # support negative indexing
indx = indx + naxis
if 0 <= indx < naxis:
self.npts = 1
self.offset = indx
self.contiguous = True
else:
raise IndexError(f"Index {indx} out of range.")
elif isinstance(indx, slice):
start, stop, step = indx.indices(naxis)
self.npts = (stop - start) // step
self.offset = start
self.contiguous = step == 1
elif isiterable(indx):
self.npts = len(indx)
self.offset = 0
self.contiguous = False
else:
raise IndexError(f"Illegal index {indx}")
|
f6fa0f2d50855d0925858cdf00d939c053775e4b679bcbe5b17ab80793428dce | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import ctypes
import itertools
import math
import re
import time
import warnings
from contextlib import suppress
import numpy as np
from astropy.io.fits import conf
from astropy.io.fits._tiled_compression import (
compress_image_data,
decompress_image_data_section,
)
from astropy.io.fits._tiled_compression.utils import _data_shape, _n_tiles, _tile_shape
from astropy.io.fits.card import Card
from astropy.io.fits.column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES
from astropy.io.fits.column import TDEF_RE, ColDefs, Column
from astropy.io.fits.fitsrec import FITS_rec
from astropy.io.fits.header import Header
from astropy.io.fits.util import (
_get_array_mmap,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
)
from astropy.utils import lazyproperty
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.shapes import simplify_basic_index
from .base import BITPIX2DTYPE, DELAYED, DTYPE2BITPIX, ExtensionHDU
from .image import ImageHDU
from .table import BinTableHDU
# This global variable is used e.g., when calling fits.open with
# disable_image_compression which temporarily changes the global variable to
# False. This should ideally be refactored to avoid relying on global module
# variables.
COMPRESSION_ENABLED = True
# Quantization dithering method constants; these are right out of fitsio.h
NO_DITHER = -1
SUBTRACTIVE_DITHER_1 = 1
SUBTRACTIVE_DITHER_2 = 2
QUANTIZE_METHOD_NAMES = {
NO_DITHER: "NO_DITHER",
SUBTRACTIVE_DITHER_1: "SUBTRACTIVE_DITHER_1",
SUBTRACTIVE_DITHER_2: "SUBTRACTIVE_DITHER_2",
}
DITHER_SEED_CLOCK = 0
DITHER_SEED_CHECKSUM = -1
COMPRESSION_TYPES = (
"NOCOMPRESS",
"RICE_1",
"GZIP_1",
"GZIP_2",
"PLIO_1",
"HCOMPRESS_1",
)
# Default compression parameter values
DEFAULT_COMPRESSION_TYPE = "RICE_1"
DEFAULT_QUANTIZE_LEVEL = 16.0
DEFAULT_QUANTIZE_METHOD = NO_DITHER
DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK
DEFAULT_HCOMP_SCALE = 0
DEFAULT_HCOMP_SMOOTH = 0
DEFAULT_BLOCK_SIZE = 32
DEFAULT_BYTE_PIX = 4
CMTYPE_ALIASES = {"RICE_ONE": "RICE_1"}
COMPRESSION_KEYWORDS = {
"ZIMAGE",
"ZCMPTYPE",
"ZBITPIX",
"ZNAXIS",
"ZMASKCMP",
"ZSIMPLE",
"ZTENSION",
"ZEXTEND",
}
def _validate_tile_shape(*, tile_shape, compression_type, image_header):
naxis = image_header["NAXIS"]
if not tile_shape:
tile_shape = []
elif len(tile_shape) != naxis:
warnings.warn(
"Provided tile size not appropriate for the data. "
"Default tile size will be used.",
AstropyUserWarning,
)
tile_shape = []
else:
tile_shape = list(tile_shape)
# Set default tile dimensions for HCOMPRESS_1
if compression_type == "HCOMPRESS_1":
if image_header["NAXIS1"] < 4 or image_header["NAXIS2"] < 4:
raise ValueError("Hcompress minimum image dimension is 4 pixels")
elif tile_shape:
if tile_shape[-1] < 4 or tile_shape[-2] < 4:
# user specified tile size is too small
raise ValueError("Hcompress minimum tile dimension is 4 pixels")
major_dims = len([ts for ts in tile_shape if ts > 1])
if major_dims > 2:
raise ValueError(
"HCOMPRESS can only support 2-dimensional tile sizes."
"All but two of the tile_shape dimensions must be set "
"to 1."
)
if tile_shape and (tile_shape[-1] == 0 and tile_shape[-2] == 0):
# compress the whole image as a single tile
tile_shape[-1] = image_header["NAXIS1"]
tile_shape[-2] = image_header["NAXIS2"]
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_shape[i] = 1
elif not tile_shape:
# The Hcompress algorithm is inherently 2D in nature, so the
# row by row tiling that is used for other compression
# algorithms is not appropriate. If the image has less than 30
# rows, then the entire image will be compressed as a single
# tile. Otherwise the tiles will consist of 16 rows of the
# image. This keeps the tiles to a reasonable size, and it
# also includes enough rows to allow good compression
# efficiency. It the last tile of the image happens to contain
# less than 4 rows, then find another tile size with between 14
# and 30 rows (preferably even), so that the last tile has at
# least 4 rows.
# 1st tile dimension is the row length of the image
tile_shape = [image_header["NAXIS1"]]
if image_header["NAXIS2"] <= 30:
tile_shape.insert(0, image_header["NAXIS1"])
else:
# look for another good tile dimension
naxis2 = image_header["NAXIS2"]
for dim in [16, 24, 20, 30, 28, 26, 22, 18, 14]:
if naxis2 % dim == 0 or naxis2 % dim > 3:
tile_shape.insert(0, dim)
break
else:
tile_shape.insert(0, 17)
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_shape.insert(0, 1)
# check if requested tile size causes the last tile to have
# less than 4 pixels
remain = image_header["NAXIS1"] % tile_shape[-1] # 1st dimen
original_tile_shape = tile_shape[:]
if remain > 0 and remain < 4:
tile_shape[-1] += 1 # try increasing tile size by 1
remain = image_header["NAXIS1"] % tile_shape[-1]
if remain > 0 and remain < 4:
raise ValueError("Last tile along 1st dimension has less than 4 pixels")
remain = image_header["NAXIS2"] % tile_shape[-2] # 2nd dimen
if remain > 0 and remain < 4:
tile_shape[-2] += 1 # try increasing tile size by 1
remain = image_header["NAXIS2"] % tile_shape[-2]
if remain > 0 and remain < 4:
raise ValueError("Last tile along 2nd dimension has less than 4 pixels")
if tile_shape != original_tile_shape:
warnings.warn(
f"The tile shape should be such that no tiles have "
f"fewer than 4 pixels. The tile shape has "
f"automatically been changed from {original_tile_shape} "
f"to {tile_shape}, but in future this will raise an "
f"error and the correct tile shape should be specified "
f"directly.",
AstropyDeprecationWarning,
)
if len(tile_shape) == 0 and image_header["NAXIS"] > 0:
tile_shape = [1] * (naxis - 1) + [image_header["NAXIS1"]]
return tuple(tile_shape)
class CompImageHeader(Header):
"""
Header object for compressed image HDUs designed to keep the compression
header and the underlying image header properly synchronized.
This essentially wraps the image header, so that all values are read from
and written to the image header. However, updates to the image header will
also update the table header where appropriate.
Note that if no image header is passed in, the code will instantiate a
regular `~astropy.io.fits.Header`.
"""
# TODO: The difficulty of implementing this screams a need to rewrite this
# module
_keyword_remaps = {
"SIMPLE": "ZSIMPLE",
"XTENSION": "ZTENSION",
"BITPIX": "ZBITPIX",
"NAXIS": "ZNAXIS",
"EXTEND": "ZEXTEND",
"BLOCKED": "ZBLOCKED",
"PCOUNT": "ZPCOUNT",
"GCOUNT": "ZGCOUNT",
"CHECKSUM": "ZHECKSUM",
"DATASUM": "ZDATASUM",
}
_zdef_re = re.compile(r"(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?")
_compression_keywords = set(_keyword_remaps.values()).union(
["ZIMAGE", "ZCMPTYPE", "ZMASKCMP", "ZQUANTIZ", "ZDITHER0"]
)
_indexed_compression_keywords = {"ZNAXIS", "ZTILE", "ZNAME", "ZVAL"}
# TODO: Once it place it should be possible to manage some of this through
# the schema system, but it's not quite ready for that yet. Also it still
# makes more sense to change CompImageHDU to subclass ImageHDU :/
def __new__(cls, table_header, image_header=None):
# 2019-09-14 (MHvK): No point wrapping anything if no image_header is
# given. This happens if __getitem__ and copy are called - our super
# class will aim to initialize a new, possibly partially filled
# header, but we cannot usefully deal with that.
# TODO: the above suggests strongly we should *not* subclass from
# Header. See also comment above about the need for reorganization.
if image_header is None:
return Header(table_header)
else:
return super().__new__(cls)
def __init__(self, table_header, image_header):
self._cards = image_header._cards
self._keyword_indices = image_header._keyword_indices
self._rvkc_indices = image_header._rvkc_indices
self._modified = image_header._modified
self._table_header = table_header
# We need to override and Header methods that can modify the header, and
# ensure that they sync with the underlying _table_header
def __setitem__(self, key, value):
# This isn't pretty, but if the `key` is either an int or a tuple we
# need to figure out what keyword name that maps to before doing
# anything else; these checks will be repeated later in the
# super().__setitem__ call but I don't see another way around it
# without some major refactoring
if self._set_slice(key, value, self):
return
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
# We don't want to specify and index otherwise, because that will
# break the behavior for new keywords and for commentary keywords
keyword, index = key, None
if self._is_reserved_keyword(keyword):
return
super().__setitem__(key, value)
if index is not None:
remapped_keyword = self._remap_keyword(keyword)
self._table_header[remapped_keyword, index] = value
# Else this will pass through to ._update
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# If given a slice pass that on to the superclass and bail out
# early; we only want to make updates to _table_header when given
# a key specifying a single keyword
return super().__delitem__(key)
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
keyword, index = key, None
if key not in self:
raise KeyError(f"Keyword {key!r} not found.")
super().__delitem__(key)
remapped_keyword = self._remap_keyword(keyword)
if remapped_keyword in self._table_header:
if index is not None:
del self._table_header[(remapped_keyword, index)]
else:
del self._table_header[remapped_keyword]
def append(self, card=None, useblanks=True, bottom=False, end=False):
# This logic unfortunately needs to be duplicated from the base class
# in order to determine the keyword
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
"The value appended to a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if self._is_reserved_keyword(card.keyword):
return
super().append(card=card, useblanks=useblanks, bottom=bottom, end=end)
remapped_keyword = self._remap_keyword(card.keyword)
# card.keyword strips the HIERARCH if present so this must be added
# back to avoid a warning.
if str(card).startswith("HIERARCH ") and not remapped_keyword.startswith(
"HIERARCH "
):
remapped_keyword = "HIERARCH " + remapped_keyword
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.append may have already deleted a blank card in the table
# header, thanks to inheritance: Header.append calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__deltitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.append(card=card, useblanks=False, bottom=bottom, end=end)
def insert(self, key, card, useblanks=True, after=False):
if isinstance(key, int):
# Determine condition to pass through to append
if after:
if key == -1:
key = len(self._cards)
else:
key += 1
if key >= len(self._cards):
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
"The value inserted into a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if self._is_reserved_keyword(card.keyword):
return
# Now the tricky part is to determine where to insert in the table
# header. If given a numerical index we need to map that to the
# corresponding index in the table header. Although rare, there may be
# cases where there is no mapping in which case we just try the same
# index
# NOTE: It is crucial that remapped_index in particular is figured out
# before the image header is modified
remapped_index = self._remap_index(key)
remapped_keyword = self._remap_keyword(card.keyword)
super().insert(key, card, useblanks=useblanks, after=after)
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.insert may have already deleted a blank card in the table
# header, thanks to inheritance: Header.insert calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__delitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.insert(remapped_index, card, useblanks=False, after=after)
def _update(self, card):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
super()._update(card)
if keyword in Card._commentary_keywords:
# Otherwise this will result in a duplicate insertion
return
remapped_keyword = self._remap_keyword(keyword)
self._table_header._update((remapped_keyword,) + card[1:])
# Last piece needed (I think) for synchronizing with the real header
# This one is tricky since _relativeinsert calls insert
def _relativeinsert(self, card, before=None, after=None, replace=False):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
# Now we have to figure out how to remap 'before' and 'after'
if before is None:
if isinstance(after, int):
remapped_after = self._remap_index(after)
else:
remapped_after = self._remap_keyword(after)
remapped_before = None
else:
if isinstance(before, int):
remapped_before = self._remap_index(before)
else:
remapped_before = self._remap_keyword(before)
remapped_after = None
super()._relativeinsert(card, before=before, after=after, replace=replace)
remapped_keyword = self._remap_keyword(keyword)
card = Card(remapped_keyword, card[1], card[2])
self._table_header._relativeinsert(
card, before=remapped_before, after=remapped_after, replace=replace
)
@classmethod
def _is_reserved_keyword(cls, keyword, warn=True):
msg = (
"Keyword {!r} is reserved for use by the FITS Tiled Image "
"Convention and will not be stored in the header for the "
"image being compressed.".format(keyword)
)
if keyword == "TFIELDS":
if warn:
warnings.warn(msg)
return True
m = TDEF_RE.match(keyword)
if m and m.group("label").upper() in TABLE_KEYWORD_NAMES:
if warn:
warnings.warn(msg)
return True
m = cls._zdef_re.match(keyword)
if m:
label = m.group("label").upper()
num = m.group("num")
if num is not None and label in cls._indexed_compression_keywords:
if warn:
warnings.warn(msg)
return True
elif label in cls._compression_keywords:
if warn:
warnings.warn(msg)
return True
return False
@classmethod
def _remap_keyword(cls, keyword):
# Given a keyword that one might set on an image, remap that keyword to
# the name used for it in the COMPRESSED HDU header
# This is mostly just a lookup in _keyword_remaps, but needs handling
# for NAXISn keywords
is_naxisn = False
if keyword[:5] == "NAXIS":
with suppress(ValueError):
index = int(keyword[5:])
is_naxisn = index > 0
if is_naxisn:
return f"ZNAXIS{index}"
# If the keyword does not need to be remapped then just return the
# original keyword
return cls._keyword_remaps.get(keyword, keyword)
def _remap_index(self, idx):
# Given an integer index into this header, map that to the index in the
# table header for the same card. If the card doesn't exist in the
# table header (generally should *not* be the case) this will just
# return the same index
# This *does* also accept a keyword or (keyword, repeat) tuple and
# obtains the associated numerical index with self._cardindex
if not isinstance(idx, int):
idx = self._cardindex(idx)
keyword, repeat = self._keyword_from_index(idx)
remapped_insert_keyword = self._remap_keyword(keyword)
with suppress(IndexError, KeyError):
idx = self._table_header._cardindex((remapped_insert_keyword, repeat))
return idx
def clear(self):
"""
Remove all cards from the header.
"""
self._table_header.clear()
super().clear()
# TODO: Fix this class so that it doesn't actually inherit from BinTableHDU,
# but instead has an internal BinTableHDU reference
class CompImageHDU(BinTableHDU):
"""
Compressed Image HDU class.
"""
_manages_own_heap = True
"""
The calls to CFITSIO lay out the heap data in memory, and we write it out
the same way CFITSIO organizes it. In principle this would break if a user
manually changes the underlying compressed data by hand, but there is no
reason they would want to do that (and if they do that's their
responsibility).
"""
_load_variable_length_data = False
"""
We don't want to always load all the tiles so by setting this option
we can then access the tiles as needed.
"""
_default_name = "COMPRESSED_IMAGE"
@deprecated_renamed_argument(
"tile_size",
None,
since="5.3",
message="The tile_size argument has been deprecated. Use tile_shape "
"instead, but note that this should be given in the reverse "
"order to tile_size (tile_shape should be in Numpy C order).",
)
def __init__(
self,
data=None,
header=None,
name=None,
compression_type=DEFAULT_COMPRESSION_TYPE,
tile_shape=None,
hcomp_scale=DEFAULT_HCOMP_SCALE,
hcomp_smooth=DEFAULT_HCOMP_SMOOTH,
quantize_level=DEFAULT_QUANTIZE_LEVEL,
quantize_method=DEFAULT_QUANTIZE_METHOD,
dither_seed=DEFAULT_DITHER_SEED,
do_not_scale_image_data=False,
uint=False,
scale_back=False,
tile_size=None,
):
"""
Parameters
----------
data : array, optional
Uncompressed image data
header : `~astropy.io.fits.Header`, optional
Header to be associated with the image; when reading the HDU from a
file (data=DELAYED), the header read from the file
name : str, optional
The ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name ``COMPRESSED_IMAGE`` is
used.
compression_type : str, optional
Compression algorithm: one of
``'RICE_1'``, ``'RICE_ONE'``, ``'PLIO_1'``, ``'GZIP_1'``,
``'GZIP_2'``, ``'HCOMPRESS_1'``, ``'NOCOMPRESS'``
tile_shape : tuple, optional
Compression tile shape, which should be specified using the default
Numpy convention for array shapes (C order). The default is to
treat each row of image as a tile.
hcomp_scale : float, optional
HCOMPRESS scale parameter
hcomp_smooth : float, optional
HCOMPRESS smooth parameter
quantize_level : float, optional
Floating point quantization level; see note below
quantize_method : int, optional
Floating point quantization dithering method; can be either
``NO_DITHER`` (-1; default), ``SUBTRACTIVE_DITHER_1`` (1), or
``SUBTRACTIVE_DITHER_2`` (2); see note below
dither_seed : int, optional
Random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), ``DITHER_SEED_CLOCK`` (0; default), or
``DITHER_SEED_CHECKSUM`` (-1); see note below
Notes
-----
The astropy.io.fits package supports 2 methods of image compression:
1) The entire FITS file may be externally compressed with the gzip
or pkzip utility programs, producing a ``*.gz`` or ``*.zip``
file, respectively. When reading compressed files of this type,
Astropy first uncompresses the entire file into a temporary file
before performing the requested read operations. The
astropy.io.fits package does not support writing to these types
of compressed files. This type of compression is supported in
the ``_File`` class, not in the `CompImageHDU` class. The file
compression type is recognized by the ``.gz`` or ``.zip`` file
name extension.
2) The `CompImageHDU` class supports the FITS tiled image
compression convention in which the image is subdivided into a
grid of rectangular tiles, and each tile of pixels is
individually compressed. The details of this FITS compression
convention are described at the `FITS Support Office web site
<https://fits.gsfc.nasa.gov/registry/tilecompression.html>`_.
Basically, the compressed image tiles are stored in rows of a
variable length array column in a FITS binary table. The
astropy.io.fits recognizes that this binary table extension
contains an image and treats it as if it were an image
extension. Under this tile-compression format, FITS header
keywords remain uncompressed. At this time, Astropy does not
support the ability to extract and uncompress sections of the
image without having to uncompress the entire image.
The astropy.io.fits package supports 3 general-purpose compression
algorithms plus one other special-purpose compression technique that is
designed for data masks with positive integer pixel values. The 3
general purpose algorithms are GZIP, Rice, and HCOMPRESS, and the
special-purpose technique is the IRAF pixel list compression technique
(PLIO). The ``compression_type`` parameter defines the compression
algorithm to be used.
The FITS image can be subdivided into any desired rectangular grid of
compression tiles. With the GZIP, Rice, and PLIO algorithms, the
default is to take each row of the image as a tile. The HCOMPRESS
algorithm is inherently 2-dimensional in nature, so the default in this
case is to take 16 rows of the image per tile. In most cases, it makes
little difference what tiling pattern is used, so the default tiles are
usually adequate. In the case of very small images, it could be more
efficient to compress the whole image as a single tile. Note that the
image dimensions are not required to be an integer multiple of the tile
dimensions; if not, then the tiles at the edges of the image will be
smaller than the other tiles. The ``tile_shape`` parameter may be
provided as a list of tile sizes, one for each dimension in the image.
For example a ``tile_shape`` value of ``(100,100)`` would divide a 300 X
300 image into 9 100 X 100 tiles.
The 4 supported image compression algorithms are all 'lossless' when
applied to integer FITS images; the pixel values are preserved exactly
with no loss of information during the compression and uncompression
process. In addition, the HCOMPRESS algorithm supports a 'lossy'
compression mode that will produce larger amount of image compression.
This is achieved by specifying a non-zero value for the ``hcomp_scale``
parameter. Since the amount of compression that is achieved depends
directly on the RMS noise in the image, it is usually more convenient
to specify the ``hcomp_scale`` factor relative to the RMS noise.
Setting ``hcomp_scale = 2.5`` means use a scale factor that is 2.5
times the calculated RMS noise in the image tile. In some cases it may
be desirable to specify the exact scaling to be used, instead of
specifying it relative to the calculated noise value. This may be done
by specifying the negative of the desired scale value (typically in the
range -2 to -100).
Very high compression factors (of 100 or more) can be achieved by using
large ``hcomp_scale`` values, however, this can produce undesirable
'blocky' artifacts in the compressed image. A variation of the
HCOMPRESS algorithm (called HSCOMPRESS) can be used in this case to
apply a small amount of smoothing of the image when it is uncompressed
to help cover up these artifacts. This smoothing is purely cosmetic
and does not cause any significant change to the image pixel values.
Setting the ``hcomp_smooth`` parameter to 1 will engage the smoothing
algorithm.
Floating point FITS images (which have ``BITPIX`` = -32 or -64) usually
contain too much 'noise' in the least significant bits of the mantissa
of the pixel values to be effectively compressed with any lossless
algorithm. Consequently, floating point images are first quantized
into scaled integer pixel values (and thus throwing away much of the
noise) before being compressed with the specified algorithm (either
GZIP, RICE, or HCOMPRESS). This technique produces much higher
compression factors than simply using the GZIP utility to externally
compress the whole FITS file, but it also means that the original
floating point value pixel values are not exactly preserved. When done
properly, this integer scaling technique will only discard the
insignificant noise while still preserving all the real information in
the image. The amount of precision that is retained in the pixel
values is controlled by the ``quantize_level`` parameter. Larger
values will result in compressed images whose pixels more closely match
the floating point pixel values, but at the same time the amount of
compression that is achieved will be reduced. Users should experiment
with different values for this parameter to determine the optimal value
that preserves all the useful information in the image, without
needlessly preserving all the 'noise' which will hurt the compression
efficiency.
The default value for the ``quantize_level`` scale factor is 16, which
means that scaled integer pixel values will be quantized such that the
difference between adjacent integer values will be 1/16th of the noise
level in the image background. An optimized algorithm is used to
accurately estimate the noise in the image. As an example, if the RMS
noise in the background pixels of an image = 32.0, then the spacing
between adjacent scaled integer pixel values will equal 2.0 by default.
Note that the RMS noise is independently calculated for each tile of
the image, so the resulting integer scaling factor may fluctuate
slightly for each tile. In some cases, it may be desirable to specify
the exact quantization level to be used, instead of specifying it
relative to the calculated noise value. This may be done by specifying
the negative of desired quantization level for the value of
``quantize_level``. In the previous example, one could specify
``quantize_level = -2.0`` so that the quantized integer levels differ
by 2.0. Larger negative values for ``quantize_level`` means that the
levels are more coarsely-spaced, and will produce higher compression
factors.
The quantization algorithm can also apply one of two random dithering
methods in order to reduce bias in the measured intensity of background
regions. The default method, specified with the constant
``SUBTRACTIVE_DITHER_1`` adds dithering to the zero-point of the
quantization array itself rather than adding noise to the actual image.
The random noise is added on a pixel-by-pixel basis, so in order
restore each pixel from its integer value to its floating point value
it is necessary to replay the same sequence of random numbers for each
pixel (see below). The other method, ``SUBTRACTIVE_DITHER_2``, is
exactly like the first except that before dithering any pixel with a
floating point value of ``0.0`` is replaced with the special integer
value ``-2147483647``. When the image is uncompressed, pixels with
this value are restored back to ``0.0`` exactly. Finally, a value of
``NO_DITHER`` disables dithering entirely.
As mentioned above, when using the subtractive dithering algorithm it
is necessary to be able to generate a (pseudo-)random sequence of noise
for each pixel, and replay that same sequence upon decompressing. To
facilitate this, a random seed between 1 and 10000 (inclusive) is used
to seed a random number generator, and that seed is stored in the
``ZDITHER0`` keyword in the header of the compressed HDU. In order to
use that seed to generate the same sequence of random numbers the same
random number generator must be used at compression and decompression
time; for that reason the tiled image convention provides an
implementation of a very simple pseudo-random number generator. The
seed itself can be provided in one of three ways, controllable by the
``dither_seed`` argument: It may be specified manually, or it may be
generated arbitrarily based on the system's clock
(``DITHER_SEED_CLOCK``) or based on a checksum of the pixels in the
image's first tile (``DITHER_SEED_CHECKSUM``). The clock-based method
is the default, and is sufficient to ensure that the value is
reasonably "arbitrary" and that the same seed is unlikely to be
generated sequentially. The checksum method, on the other hand,
ensures that the same seed is used every time for a specific image.
This is particularly useful for software testing as it ensures that the
same image will always use the same seed.
"""
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
if tile_shape is None and tile_size is not None:
tile_shape = tuple(tile_size[::-1])
elif tile_shape is not None and tile_size is not None:
raise ValueError(
"Cannot specify both tile_size and tile_shape. "
"Note that tile_size is deprecated and tile_shape "
"alone should be used."
)
if data is DELAYED:
# Reading the HDU from a file
super().__init__(data=data, header=header)
else:
# Create at least a skeleton HDU that matches the input
# header and data (if any were input)
super().__init__(data=None, header=header)
# Store the input image data
self.data = data
# Update the table header (_header) to the compressed
# image format and to match the input data (if any);
# Create the image header (_image_header) from the input
# image header (if any) and ensure it matches the input
# data; Create the initially empty table data array to
# hold the compressed data.
self._update_header_data(
header,
name,
compression_type=compression_type,
tile_shape=tile_shape,
hcomp_scale=hcomp_scale,
hcomp_smooth=hcomp_smooth,
quantize_level=quantize_level,
quantize_method=quantize_method,
dither_seed=dither_seed,
)
# TODO: A lot of this should be passed on to an internal image HDU o
# something like that, see ticket #88
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
self._axes = [
self._header.get("ZNAXIS" + str(axis + 1), 0)
for axis in range(self._header.get("ZNAXIS", 0))
]
# store any scale factors from the table header
if do_not_scale_image_data:
self._bzero = 0
self._bscale = 1
else:
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
self._bitpix = self._header["ZBITPIX"]
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_bitpix = self._bitpix
def _remove_unnecessary_default_extnames(self, header):
"""Remove default EXTNAME values if they are unnecessary.
Some data files (eg from CFHT) can have the default EXTNAME and
an explicit value. This method removes the default if a more
specific header exists. It also removes any duplicate default
values.
"""
if "EXTNAME" in header:
indices = header._keyword_indices["EXTNAME"]
# Only continue if there is more than one found
n_extname = len(indices)
if n_extname > 1:
extnames_to_remove = [
index for index in indices if header[index] == self._default_name
]
if len(extnames_to_remove) == n_extname:
# Keep the first (they are all the same)
extnames_to_remove.pop(0)
# Remove them all in reverse order to keep the index unchanged.
for index in reversed(sorted(extnames_to_remove)):
del header[index]
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
# Similar to base class but uses .header rather than ._header
return str(self.header.get("EXTNAME", self._default_name))
@name.setter
def name(self, value):
# This is a copy of the base class but using .header instead
# of ._header to ensure that the name stays in sync.
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if "EXTNAME" in self.header:
self.header["EXTNAME"] = value
else:
self.header["EXTNAME"] = (value, "extension name")
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != "XTENSION":
return False
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
if xtension not in ("BINTABLE", "A3DTABLE"):
return False
if "ZIMAGE" not in header or not header["ZIMAGE"]:
return False
return COMPRESSION_ENABLED
def _update_header_data(
self,
image_header,
name=None,
compression_type=None,
tile_shape=None,
hcomp_scale=None,
hcomp_smooth=None,
quantize_level=None,
quantize_method=None,
dither_seed=None,
):
"""
Update the table header (`_header`) to the compressed
image format and to match the input data (if any). Create
the image header (`_image_header`) from the input image
header (if any) and ensure it matches the input
data. Create the initially-empty table data array to hold
the compressed data.
This method is mainly called internally, but a user may wish to
call this method after assigning new data to the `CompImageHDU`
object that is of a different type.
Parameters
----------
image_header : `~astropy.io.fits.Header`
header to be associated with the image
name : str, optional
the ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name 'COMPRESSED_IMAGE' is used
compression_type : str, optional
compression algorithm 'RICE_1', 'PLIO_1', 'GZIP_1', 'GZIP_2',
'HCOMPRESS_1', 'NOCOMPRESS'; if this value is `None`, use value
already in the header; if no value already in the header, use
'RICE_1'
tile_shape : tuple of int, optional
compression tile shape (in C order); if this value is `None`, use
value already in the header; if no value already in the header,
treat each row of image as a tile
hcomp_scale : float, optional
HCOMPRESS scale parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 1
hcomp_smooth : float, optional
HCOMPRESS smooth parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 0
quantize_level : float, optional
floating point quantization level; if this value is `None`, use the
value already in the header; if no value already in header, use 16
quantize_method : int, optional
floating point quantization dithering method; can be either
NO_DITHER (-1), SUBTRACTIVE_DITHER_1 (1; default), or
SUBTRACTIVE_DITHER_2 (2)
dither_seed : int, optional
random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), DITHER_SEED_CLOCK (0; default), or
DITHER_SEED_CHECKSUM (-1)
"""
# Clean up EXTNAME duplicates
self._remove_unnecessary_default_extnames(self._header)
image_hdu = ImageHDU(data=self.data, header=self._header)
self._image_header = CompImageHeader(self._header, image_hdu.header)
self._axes = image_hdu._axes
del image_hdu
# Determine based on the size of the input data whether to use the Q
# column format to store compressed data or the P format.
# The Q format is used only if the uncompressed data is larger than
# 4 GB. This is not a perfect heuristic, as one can contrive an input
# array which, when compressed, the entire binary table representing
# the compressed data is larger than 4GB. That said, this is the same
# heuristic used by CFITSIO, so this should give consistent results.
# And the cases where this heuristic is insufficient are extreme and
# almost entirely contrived corner cases, so it will do for now
if self._has_data:
huge_hdu = self.data.nbytes > 2**32
else:
huge_hdu = False
# Update the extension name in the table header
if not name and "EXTNAME" not in self._header:
# Do not sync this with the image header since the default
# name is specific to the table header.
self._header.set(
"EXTNAME",
self._default_name,
"name of this binary table extension",
after="TFIELDS",
)
elif name:
# Force the name into table and image headers.
self.name = name
# Set the compression type in the table header.
if compression_type:
if compression_type not in COMPRESSION_TYPES:
warnings.warn(
"Unknown compression type provided (supported are {}). "
"Default ({}) compression will be used.".format(
", ".join(map(repr, COMPRESSION_TYPES)),
DEFAULT_COMPRESSION_TYPE,
),
AstropyUserWarning,
)
compression_type = DEFAULT_COMPRESSION_TYPE
self._header.set(
"ZCMPTYPE", compression_type, "compression algorithm", after="TFIELDS"
)
else:
compression_type = self.compression_type
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
# If the input image header had BSCALE/BZERO cards, then insert
# them in the table header.
if image_header:
bzero = image_header.get("BZERO", 0.0)
bscale = image_header.get("BSCALE", 1.0)
after_keyword = "EXTNAME"
if bscale != 1.0:
self._header.set("BSCALE", bscale, after=after_keyword)
after_keyword = "BSCALE"
if bzero != 0.0:
self._header.set("BZERO", bzero, after=after_keyword)
try:
bitpix_comment = image_header.comments["BITPIX"]
except (AttributeError, KeyError):
bitpix_comment = "data type of original image"
try:
naxis_comment = image_header.comments["NAXIS"]
except (AttributeError, KeyError):
naxis_comment = "dimension of original image"
# Set the label for the first column in the table
self._header.set(
"TTYPE1", "COMPRESSED_DATA", "label for field 1", after="TFIELDS"
)
# Set the data format for the first column. It is dependent
# on the requested compression type.
if compression_type == "PLIO_1":
tform1 = "1QI" if huge_hdu else "1PI"
else:
tform1 = "1QB" if huge_hdu else "1PB"
self._header.set(
"TFORM1",
tform1,
"data format of field: variable length array",
after="TTYPE1",
)
# Create the first column for the table. This column holds the
# compressed data.
col1 = Column(name=self._header["TTYPE1"], format=tform1)
# Create the additional columns required for floating point
# data and calculate the width of the output table.
zbitpix = self._image_header["BITPIX"]
if zbitpix < 0 and quantize_level != 0.0:
# floating point image has 'COMPRESSED_DATA',
# 'UNCOMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using
# lossless compression, per CFITSIO)
ncols = 4
# CFITSIO 3.28 and up automatically use the GZIP_COMPRESSED_DATA
# store floating point data that couldn't be quantized, instead
# of the UNCOMPRESSED_DATA column. There's no way to control
# this behavior so the only way to determine which behavior will
# be employed is via the CFITSIO version
ttype2 = "GZIP_COMPRESSED_DATA"
# The required format for the GZIP_COMPRESSED_DATA is actually
# missing from the standard docs, but CFITSIO suggests it
# should be 1PB, which is logical.
tform2 = "1QB" if huge_hdu else "1PB"
# Set up the second column for the table that will hold any
# uncompressable data.
self._header.set("TTYPE2", ttype2, "label for field 2", after="TFORM1")
self._header.set(
"TFORM2",
tform2,
"data format of field: variable length array",
after="TTYPE2",
)
col2 = Column(name=ttype2, format=tform2)
# Set up the third column for the table that will hold
# the scale values for quantized data.
self._header.set("TTYPE3", "ZSCALE", "label for field 3", after="TFORM2")
self._header.set(
"TFORM3", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE3"
)
col3 = Column(name=self._header["TTYPE3"], format=self._header["TFORM3"])
# Set up the fourth column for the table that will hold
# the zero values for the quantized data.
self._header.set("TTYPE4", "ZZERO", "label for field 4", after="TFORM3")
self._header.set(
"TFORM4", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE4"
)
after = "TFORM4"
col4 = Column(name=self._header["TTYPE4"], format=self._header["TFORM4"])
# Create the ColDefs object for the table
cols = ColDefs([col1, col2, col3, col4])
else:
# default table has just one 'COMPRESSED_DATA' column
ncols = 1
after = "TFORM1"
# remove any header cards for the additional columns that
# may be left over from the previous data
to_remove = ["TTYPE2", "TFORM2", "TTYPE3", "TFORM3", "TTYPE4", "TFORM4"]
for k in to_remove:
try:
del self._header[k]
except KeyError:
pass
# Create the ColDefs object for the table
cols = ColDefs([col1])
# Update the table header with the width of the table, the
# number of fields in the table, the indicator for a compressed
# image HDU, the data type of the image data and the number of
# dimensions in the image data array.
self._header.set("NAXIS1", cols.dtype.itemsize, "width of table in bytes")
self._header.set(
"TFIELDS", ncols, "number of fields in each row", after="GCOUNT"
)
self._header.set(
"ZIMAGE", True, "extension contains compressed image", after=after
)
self._header.set("ZBITPIX", zbitpix, bitpix_comment, after="ZIMAGE")
self._header.set(
"ZNAXIS", self._image_header["NAXIS"], naxis_comment, after="ZBITPIX"
)
# Strip the table header of all the ZNAZISn and ZTILEn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
try:
del self._header["ZNAXIS" + str(idx)]
del self._header["ZTILE" + str(idx)]
except KeyError:
break
# Verify that any input tile size parameter is the appropriate
# size to match the HDU's data.
tile_shape = _validate_tile_shape(
tile_shape=tile_shape,
compression_type=compression_type,
image_header=self._image_header,
)
# Set up locations for writing the next cards in the header.
last_znaxis = "ZNAXIS"
if self._image_header["NAXIS"] > 0:
after1 = "ZNAXIS1"
else:
after1 = "ZNAXIS"
# Calculate the number of rows in the output table and
# write the ZNAXISn and ZTILEn cards to the table header.
nrows = 0
for idx, axis in enumerate(self._axes):
naxis = "NAXIS" + str(idx + 1)
znaxis = "ZNAXIS" + str(idx + 1)
ztile = "ZTILE" + str(idx + 1)
ts = tile_shape[len(self._axes) - 1 - idx]
if not nrows:
nrows = (axis - 1) // ts + 1
else:
nrows *= (axis - 1) // ts + 1
if image_header and naxis in image_header:
self._header.set(
znaxis, axis, image_header.comments[naxis], after=last_znaxis
)
else:
self._header.set(
znaxis, axis, "length of original image axis", after=last_znaxis
)
self._header.set(ztile, ts, "size of tiles to be compressed", after=after1)
last_znaxis = znaxis
after1 = ztile
# Set the NAXIS2 header card in the table hdu to the number of
# rows in the table.
self._header.set("NAXIS2", nrows, "number of rows in table")
self.columns = cols
# Set the compression parameters in the table header.
# First, setup the values to be used for the compression parameters
# in case none were passed in. This will be either the value
# already in the table header for that parameter or the default
# value.
for idx in itertools.count(1):
zname = "ZNAME" + str(idx)
if zname not in self._header:
break
zval = "ZVAL" + str(idx)
if self._header[zname] == "NOISEBIT":
if quantize_level is None:
quantize_level = self._header[zval]
if self._header[zname] == "SCALE ":
if hcomp_scale is None:
hcomp_scale = self._header[zval]
if self._header[zname] == "SMOOTH ":
if hcomp_smooth is None:
hcomp_smooth = self._header[zval]
if quantize_level is None:
quantize_level = DEFAULT_QUANTIZE_LEVEL
if hcomp_scale is None:
hcomp_scale = DEFAULT_HCOMP_SCALE
if hcomp_smooth is None:
hcomp_smooth = DEFAULT_HCOMP_SCALE
# Next, strip the table header of all the ZNAMEn and ZVALn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
zname = "ZNAME" + str(idx)
if zname not in self._header:
break
zval = "ZVAL" + str(idx)
del self._header[zname]
del self._header[zval]
# Finally, put the appropriate keywords back based on the
# compression type.
after_keyword = "ZCMPTYPE"
idx = 1
if compression_type == "RICE_1":
self._header.set(
"ZNAME1", "BLOCKSIZE", "compression block size", after=after_keyword
)
self._header.set(
"ZVAL1", DEFAULT_BLOCK_SIZE, "pixels per block", after="ZNAME1"
)
self._header.set(
"ZNAME2", "BYTEPIX", "bytes per pixel (1, 2, 4, or 8)", after="ZVAL1"
)
if self._header["ZBITPIX"] == 8:
bytepix = 1
elif self._header["ZBITPIX"] == 16:
bytepix = 2
else:
bytepix = DEFAULT_BYTE_PIX
self._header.set(
"ZVAL2", bytepix, "bytes per pixel (1, 2, 4, or 8)", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
elif compression_type == "HCOMPRESS_1":
self._header.set(
"ZNAME1", "SCALE", "HCOMPRESS scale factor", after=after_keyword
)
self._header.set(
"ZVAL1", hcomp_scale, "HCOMPRESS scale factor", after="ZNAME1"
)
self._header.set(
"ZNAME2", "SMOOTH", "HCOMPRESS smooth option", after="ZVAL1"
)
self._header.set(
"ZVAL2", hcomp_smooth, "HCOMPRESS smooth option", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
if self._image_header["BITPIX"] < 0: # floating point image
self._header.set(
"ZNAME" + str(idx),
"NOISEBIT",
"floating point quantization level",
after=after_keyword,
)
self._header.set(
"ZVAL" + str(idx),
quantize_level,
"floating point quantization level",
after="ZNAME" + str(idx),
)
# Add the dither method and seed
if quantize_method:
if quantize_method not in [
NO_DITHER,
SUBTRACTIVE_DITHER_1,
SUBTRACTIVE_DITHER_2,
]:
name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD]
warnings.warn(
"Unknown quantization method provided. "
"Default method ({}) used.".format(name)
)
quantize_method = DEFAULT_QUANTIZE_METHOD
if quantize_method == NO_DITHER:
zquantiz_comment = "No dithering during quantization"
else:
zquantiz_comment = "Pixel Quantization Algorithm"
self._header.set(
"ZQUANTIZ",
QUANTIZE_METHOD_NAMES[quantize_method],
zquantiz_comment,
after="ZVAL" + str(idx),
)
else:
# If the ZQUANTIZ keyword is missing the default is to assume
# no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD
# is set to
quantize_method = self._header.get("ZQUANTIZ", NO_DITHER)
if isinstance(quantize_method, str):
for k, v in QUANTIZE_METHOD_NAMES.items():
if v.upper() == quantize_method:
quantize_method = k
break
else:
quantize_method = NO_DITHER
if quantize_method == NO_DITHER:
if "ZDITHER0" in self._header:
# If dithering isn't being used then there's no reason to
# keep the ZDITHER0 keyword
del self._header["ZDITHER0"]
else:
if dither_seed:
dither_seed = self._generate_dither_seed(dither_seed)
elif "ZDITHER0" in self._header:
dither_seed = self._header["ZDITHER0"]
else:
dither_seed = self._generate_dither_seed(DEFAULT_DITHER_SEED)
self._header.set(
"ZDITHER0",
dither_seed,
"dithering offset when quantizing floats",
after="ZQUANTIZ",
)
if image_header:
# Move SIMPLE card from the image header to the
# table header as ZSIMPLE card.
if "SIMPLE" in image_header:
self._header.set(
"ZSIMPLE",
image_header["SIMPLE"],
image_header.comments["SIMPLE"],
before="ZBITPIX",
)
# Move EXTEND card from the image header to the
# table header as ZEXTEND card.
if "EXTEND" in image_header:
self._header.set(
"ZEXTEND", image_header["EXTEND"], image_header.comments["EXTEND"]
)
# Move BLOCKED card from the image header to the
# table header as ZBLOCKED card.
if "BLOCKED" in image_header:
self._header.set(
"ZBLOCKED",
image_header["BLOCKED"],
image_header.comments["BLOCKED"],
)
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if "XTENSION" in image_header:
self._header.set(
"ZTENSION",
"IMAGE",
image_header.comments["XTENSION"],
before="ZBITPIX",
)
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if "PCOUNT" in image_header:
self._header.set(
"ZPCOUNT",
image_header["PCOUNT"],
image_header.comments["PCOUNT"],
after=last_znaxis,
)
if "GCOUNT" in image_header:
self._header.set(
"ZGCOUNT",
image_header["GCOUNT"],
image_header.comments["GCOUNT"],
after="ZPCOUNT",
)
# Move CHECKSUM and DATASUM cards from the image header to the
# table header as XHECKSUM and XDATASUM cards.
if "CHECKSUM" in image_header:
self._header.set(
"ZHECKSUM",
image_header["CHECKSUM"],
image_header.comments["CHECKSUM"],
)
if "DATASUM" in image_header:
self._header.set(
"ZDATASUM",
image_header["DATASUM"],
image_header.comments["DATASUM"],
)
else:
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if "XTENSION" in self._image_header:
self._header.set(
"ZTENSION",
"IMAGE",
self._image_header.comments["XTENSION"],
before="ZBITPIX",
)
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if "PCOUNT" in self._image_header:
self._header.set(
"ZPCOUNT",
self._image_header["PCOUNT"],
self._image_header.comments["PCOUNT"],
after=last_znaxis,
)
if "GCOUNT" in self._image_header:
self._header.set(
"ZGCOUNT",
self._image_header["GCOUNT"],
self._image_header.comments["GCOUNT"],
after="ZPCOUNT",
)
# When we have an image checksum we need to ensure that the same
# number of blank cards exist in the table header as there were in
# the image header. This allows those blank cards to be carried
# over to the image header when the hdu is uncompressed.
if "ZHECKSUM" in self._header:
required_blanks = image_header._countblanks()
image_blanks = self._image_header._countblanks()
table_blanks = self._header._countblanks()
for _ in range(required_blanks - image_blanks):
self._image_header.append()
table_blanks += 1
for _ in range(required_blanks - table_blanks):
self._header.append()
def _scale_data(self, data):
if self._orig_bzero != 0 or self._orig_bscale != 1:
new_dtype = self._dtype_for_bitpix()
data = np.array(data, dtype=new_dtype)
if "BLANK" in self._header:
blanks = data == np.array(self._header["BLANK"], dtype="int32")
else:
blanks = None
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data += self._bzero, and we
# do this instead of self.data = self.data + self._bzero to
# avoid doubling memory usage.
np.add(data, self._orig_bzero, out=data, casting="unsafe")
if blanks is not None:
data = np.where(blanks, np.nan, data)
return data
@lazyproperty
def data(self):
"""
The decompressed data array.
Note that accessing this will cause all the tiles to be loaded,
decompressed, and combined into a single data array. If you do
not need to access the whole array, consider instead using the
:attr:`~astropy.io.fits.CompImageHDU.section` property.
"""
if len(self.compressed_data) == 0:
return None
# Since .section has general code to load any arbitrary part of the
# data, we can just use this - and the @lazyproperty on the current
# property will ensure that we do this only once.
data = self.section[...]
# Right out of _ImageBaseHDU.data
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if (data is not None) and (
not isinstance(data, np.ndarray) or data.dtype.fields is not None
):
raise TypeError(
"CompImageHDU data has incorrect type:{}; dtype.fields = {}".format(
type(data), data.dtype.fields
)
)
@lazyproperty
def compressed_data(self):
# First we will get the table data (the compressed
# data) from the file, if there is any.
compressed_data = super().data
if isinstance(compressed_data, np.rec.recarray):
# Make sure not to use 'del self.data' so we don't accidentally
# go through the self.data.fdel and close the mmap underlying
# the compressed_data array
del self.__dict__["data"]
return compressed_data
else:
# This will actually set self.compressed_data with the
# pre-allocated space for the compression data; this is something I
# might do away with in the future
self._update_compressed_data()
return self.compressed_data
@compressed_data.deleter
def compressed_data(self):
# Deleting the compressed_data attribute has to be handled
# with a little care to prevent a reference leak
# First delete the ._coldefs attributes under it to break a possible
# reference cycle
if "compressed_data" in self.__dict__:
del self.__dict__["compressed_data"]._coldefs
# Now go ahead and delete from self.__dict__; normally
# lazyproperty.__delete__ does this for us, but we can prempt it to
# do some additional cleanup
del self.__dict__["compressed_data"]
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@lazyproperty
def header(self):
# The header attribute is the header for the image data. It
# is not actually stored in the object dictionary. Instead,
# the _image_header is stored. If the _image_header attribute
# has already been defined we just return it. If not, we must
# create it from the table header (the _header attribute).
if hasattr(self, "_image_header"):
return self._image_header
# Clean up any possible doubled EXTNAME keywords that use
# the default. Do this on the original header to ensure
# duplicates are removed cleanly.
self._remove_unnecessary_default_extnames(self._header)
# Start with a copy of the table header.
image_header = self._header.copy()
# Delete cards that are related to the table. And move
# the values of those cards that relate to the image from
# their corresponding table cards. These include
# ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn.
# (Note: Used set here instead of list in case there are any duplicate
# keywords, which there may be in some pathological cases:
# https://github.com/astropy/astropy/issues/2750
for keyword in set(image_header):
if CompImageHeader._is_reserved_keyword(keyword, warn=False):
del image_header[keyword]
hcomments = self._header.comments
if "ZSIMPLE" in self._header:
image_header.set(
"SIMPLE", self._header["ZSIMPLE"], hcomments["ZSIMPLE"], before=0
)
del image_header["XTENSION"]
elif "ZTENSION" in self._header:
if self._header["ZTENSION"] != "IMAGE":
warnings.warn(
"ZTENSION keyword in compressed extension != 'IMAGE'",
AstropyUserWarning,
)
image_header.set("XTENSION", "IMAGE", hcomments["ZTENSION"], before=0)
else:
image_header.set("XTENSION", "IMAGE", before=0)
image_header.set(
"BITPIX", self._header["ZBITPIX"], hcomments["ZBITPIX"], before=1
)
image_header.set("NAXIS", self._header["ZNAXIS"], hcomments["ZNAXIS"], before=2)
last_naxis = "NAXIS"
for idx in range(image_header["NAXIS"]):
znaxis = "ZNAXIS" + str(idx + 1)
naxis = znaxis[1:]
image_header.set(
naxis, self._header[znaxis], hcomments[znaxis], after=last_naxis
)
last_naxis = naxis
# Delete any other spurious NAXISn keywords:
naxis = image_header["NAXIS"]
for keyword in list(image_header["NAXIS?*"]):
try:
n = int(keyword[5:])
except Exception:
continue
if n > naxis:
del image_header[keyword]
# Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs,
# ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs
# their values are always 0 and 1 respectively
if "ZPCOUNT" in self._header:
image_header.set(
"PCOUNT",
self._header["ZPCOUNT"],
hcomments["ZPCOUNT"],
after=last_naxis,
)
else:
image_header.set("PCOUNT", 0, after=last_naxis)
if "ZGCOUNT" in self._header:
image_header.set(
"GCOUNT", self._header["ZGCOUNT"], hcomments["ZGCOUNT"], after="PCOUNT"
)
else:
image_header.set("GCOUNT", 1, after="PCOUNT")
if "ZEXTEND" in self._header:
image_header.set("EXTEND", self._header["ZEXTEND"], hcomments["ZEXTEND"])
if "ZBLOCKED" in self._header:
image_header.set("BLOCKED", self._header["ZBLOCKED"], hcomments["ZBLOCKED"])
# Move the ZHECKSUM and ZDATASUM cards to the image header
# as CHECKSUM and DATASUM
if "ZHECKSUM" in self._header:
image_header.set(
"CHECKSUM", self._header["ZHECKSUM"], hcomments["ZHECKSUM"]
)
if "ZDATASUM" in self._header:
image_header.set("DATASUM", self._header["ZDATASUM"], hcomments["ZDATASUM"])
# Remove the EXTNAME card if the value in the table header
# is the default value of COMPRESSED_IMAGE.
if "EXTNAME" in image_header and image_header["EXTNAME"] == self._default_name:
del image_header["EXTNAME"]
# Remove the PCOUNT GCOUNT cards if the uncompressed header is
# from a primary HDU
if "SIMPLE" in image_header:
del image_header["PCOUNT"]
del image_header["GCOUNT"]
# Look to see if there are any blank cards in the table
# header. If there are, there should be the same number
# of blank cards in the image header. Add blank cards to
# the image header to make it so.
table_blanks = self._header._countblanks()
image_blanks = image_header._countblanks()
for _ in range(table_blanks - image_blanks):
image_header.append()
# Create the CompImageHeader that syncs with the table header, and save
# it off to self._image_header so it can be referenced later
# unambiguously
self._image_header = CompImageHeader(self._header, image_header)
return self._image_header
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
_shape, _format = (), ""
else:
# the shape will be in the order of NAXIS's which is the
# reverse of the numarray shape
_shape = list(self.data.shape)
_format = self.data.dtype.name
_shape.reverse()
_shape = tuple(_shape)
_format = _format[_format.rfind(".") + 1 :]
# if data is not touched yet, use header info.
else:
_shape = ()
for idx in range(self.header["NAXIS"]):
_shape += (self.header["NAXIS" + str(idx + 1)],)
_format = BITPIX2DTYPE[self.header["BITPIX"]]
return (self.name, self.ver, class_name, len(self.header), _shape, _format)
def _update_compressed_data(self):
"""
Compress the image data so that it may be written to a file.
"""
# Check to see that the image_header matches the image data
image_bitpix = DTYPE2BITPIX[self.data.dtype.name]
if image_bitpix != self._orig_bitpix or self.data.shape != self.shape:
self._update_header_data(self.header)
# TODO: This is copied right out of _ImageBaseHDU._writedata_internal;
# it would be cool if we could use an internal ImageHDU and use that to
# write to a buffer for compression or something. See ticket #88
# deal with unsigned integer 16, 32 and 64 data
old_data = self.data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
self.data = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f"=i{self.data.dtype.itemsize}",
)
try:
nrows = self._header["NAXIS2"]
tbsize = self._header["NAXIS1"] * nrows
self._header["PCOUNT"] = 0
if "THEAP" in self._header:
del self._header["THEAP"]
self._theap = tbsize
# First delete the original compressed data, if it exists
del self.compressed_data
# Compress the data.
# compress_image_data returns the size of the heap for the written
# compressed image table
heapsize, self.compressed_data = compress_image_data(
self.data, self.compression_type, self._header, self.columns
)
finally:
self.data = old_data
table_len = len(self.compressed_data) - heapsize
if table_len != self._theap:
raise Exception(
f"Unexpected compressed table size (expected {self._theap}, got {table_len})"
)
# CFITSIO will write the compressed data in big-endian order
dtype = self.columns.dtype.newbyteorder(">")
buf = self.compressed_data
compressed_data = buf[: self._theap].view(dtype=dtype, type=np.rec.recarray)
self.compressed_data = compressed_data.view(FITS_rec)
self.compressed_data._coldefs = self.columns
self.compressed_data._heapoffset = self._theap
self.compressed_data._heapsize = heapsize
def scale(self, type=None, option="old", bscale=1, bzero=0):
"""
Scale image data by using ``BSCALE`` and ``BZERO``.
Calling this method will scale ``self.data`` and update the keywords of
``BSCALE`` and ``BZERO`` in ``self._header`` and ``self._image_header``.
This method should only be used right before writing to the output
file, as the data will be scaled and is therefore not very usable after
the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy dtype
name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is
`None`, use the current data type.
option : str, optional
how to scale the data: if ``"old"``, use the original ``BSCALE``
and ``BZERO`` values when the data was read/created. If
``"minmax"``, use the minimum and maximum of the data to scale.
The option will be overwritten by any user-specified bscale/bzero
values.
bscale, bzero : int, optional
user specified ``BSCALE`` and ``BZERO`` values.
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale != 1 or bzero != 0:
_scale = bscale
_zero = bzero
else:
if option == "old":
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == "minmax":
if isinstance(_type, np.floating):
_scale = 1
_zero = 0
else:
_min = np.minimum.reduce(self.data.flat)
_max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = _min
_scale = (_max - _min) / (2.0**8 - 1)
else:
_zero = (_max + _min) / 2.0
# throw away -2^N
_scale = (_max - _min) / (2.0 ** (8 * _type.bytes) - 2)
# Do the scaling
if _zero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data -= _zero, and we
# do this instead of self.data = self.data - _zero to
# avoid doubling memory usage.
np.subtract(self.data, _zero, out=self.data, casting="unsafe")
self.header["BZERO"] = _zero
else:
# Delete from both headers
for header in (self.header, self._header):
with suppress(KeyError):
del header["BZERO"]
if _scale != 1:
self.data /= _scale
self.header["BSCALE"] = _scale
else:
for header in (self.header, self._header):
with suppress(KeyError):
del header["BSCALE"]
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type) # 0.7.7.1
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self.header.get("BZERO", 0)
self._bscale = self.header.get("BSCALE", 1)
# Update BITPIX for the image header specifically
# TODO: Make this more clear by using self._image_header, but only once
# this has been fixed so that the _image_header attribute is guaranteed
# to be valid
self.header["BITPIX"] = self._bitpix
# Update the table header to match the scaled data
self._update_header_data(self.header)
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self.scale(BITPIX2DTYPE[self._orig_bitpix])
if self._has_data:
self._update_compressed_data()
# Use methods in the superclass to update the header with
# scale/checksum keywords based on the data type of the image data
self._update_pseudo_int_scale_keywords()
# Shove the image header and data into a new ImageHDU and use that
# to compute the image checksum
image_hdu = ImageHDU(data=self.data, header=self.header)
image_hdu._update_checksum(checksum)
if "CHECKSUM" in image_hdu.header:
# This will also pass through to the ZHECKSUM keyword and
# ZDATASUM keyword
self._image_header.set(
"CHECKSUM",
image_hdu.header["CHECKSUM"],
image_hdu.header.comments["CHECKSUM"],
)
if "DATASUM" in image_hdu.header:
self._image_header.set(
"DATASUM",
image_hdu.header["DATASUM"],
image_hdu.header.comments["DATASUM"],
)
# Store a temporary backup of self.data in a different attribute;
# see below
self._imagedata = self.data
# Now we need to perform an ugly hack to set the compressed data as
# the .data attribute on the HDU so that the call to _writedata
# handles it properly
self.__dict__["data"] = self.compressed_data
return super()._prewriteto(checksum=checksum, inplace=inplace)
def _writeheader(self, fileobj):
"""
Bypasses `BinTableHDU._writeheader()` which updates the header with
metadata about the data that is meaningless here; another reason
why this class maybe shouldn't inherit directly from BinTableHDU...
"""
return ExtensionHDU._writeheader(self, fileobj)
def _writedata(self, fileobj):
"""
Wrap the basic ``_writedata`` method to restore the ``.data``
attribute to the uncompressed image data in the case of an exception.
"""
try:
return super()._writedata(fileobj)
finally:
# Restore the .data attribute to its rightful value (if any)
if hasattr(self, "_imagedata"):
self.__dict__["data"] = self._imagedata
del self._imagedata
else:
del self.data
def _close(self, closed=True):
super()._close(closed=closed)
# Also make sure to close access to the compressed data mmaps
if (
closed
and self._data_loaded
and _get_array_mmap(self.compressed_data) is not None
):
del self.compressed_data
# TODO: This was copied right out of _ImageBaseHDU; get rid of it once we
# find a way to rewrite this class as either a subclass or wrapper for an
# ImageHDU
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
for bits, dtype in (
(16, np.dtype("uint16")),
(32, np.dtype("uint32")),
(64, np.dtype("uint64")),
):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype("float64")
elif bitpix > 0: # scale integers to Float32
return np.dtype("float32")
def _update_header_scale_info(self, dtype=None):
if not self._do_not_scale_image_data and not (
self._orig_bzero == 0 and self._orig_bscale == 1
):
for keyword in ["BSCALE", "BZERO"]:
# Make sure to delete from both the image header and the table
# header; later this will be streamlined
for header in (self.header, self._header):
with suppress(KeyError):
del header[keyword]
# Since _update_header_scale_info can, currently, be
# called *after* _prewriteto(), replace these with
# blank cards so the header size doesn't change
header.append()
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self.header["BITPIX"] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self.header["BITPIX"]
def _generate_dither_seed(self, seed):
if not _is_int(seed):
raise TypeError("Seed must be an integer")
if not -1 <= seed <= 10000:
raise ValueError(
"Seed for random dithering must be either between 1 and "
"10000 inclusive, 0 for autogeneration from the system "
"clock, or -1 for autogeneration from a checksum of the first "
"image tile (got {})".format(seed)
)
if seed == DITHER_SEED_CHECKSUM:
# Determine the tile dimensions from the ZTILEn keywords
tile_dims = self.tile_shape
# Get the first tile by using the tile dimensions as the end
# indices of slices (starting from 0)
first_tile = self.data[tuple(slice(d) for d in tile_dims)]
# The checksum algorithm used is literally just the sum of the bytes
# of the tile data (not its actual floating point values). Integer
# overflow is irrelevant.
csum = first_tile.view(dtype="uint8").sum()
# Since CFITSIO uses an unsigned long (which may be different on
# different platforms) go ahead and truncate the sum to its
# unsigned long value and take the result modulo 10000
return (ctypes.c_ulong(csum).value % 10000) + 1
elif seed == DITHER_SEED_CLOCK:
# This isn't exactly the same algorithm as CFITSIO, but that's okay
# since the result is meant to be arbitrary. The primary difference
# is that CFITSIO incorporates the HDU number into the result in
# the hopes of heading off the possibility of the same seed being
# generated for two HDUs at the same time. Here instead we just
# add in the HDU object's id
return (
(sum(int(x) for x in math.modf(time.time())) + id(self)) % 10000
) + 1
else:
return seed
@property
def section(self):
"""
Efficiently access a section of the image array
This property can be used to access a section of the data without
loading and decompressing the entire array into memory.
The :class:`~astropy.io.fits.CompImageSection` object returned by this
attribute is not meant to be used directly by itself. Rather, slices of
the section return the appropriate slice of the data, and loads *only*
that section into memory. Any valid basic Numpy index can be used to
slice :class:`~astropy.io.fits.CompImageSection`.
Note that accessing data using :attr:`CompImageHDU.section` will always
load tiles one at a time from disk, and therefore when accessing a large
fraction of the data (or slicing it in a way that would cause most tiles
to be loaded) you may obtain better performance by using
:attr:`CompImageHDU.data`.
"""
return CompImageSection(self)
@property
def tile_shape(self):
"""
The tile shape used for the tiled compression.
This shape is given in Numpy/C order
"""
return tuple(
[
self._header[f"ZTILE{idx + 1}"]
for idx in range(self._header["ZNAXIS"] - 1, -1, -1)
]
)
@property
def compression_type(self):
"""
The name of the compression algorithm.
"""
return self._header.get("ZCMPTYPE", DEFAULT_COMPRESSION_TYPE)
class CompImageSection:
"""
Class enabling subsets of CompImageHDU data to be loaded lazily via slicing.
Slices of this object load the corresponding section of an image array from
the underlying FITS file, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`astropy:data-sections` section of the Astropy documentation
for more details.
"""
def __init__(self, hdu):
self.hdu = hdu
self._data_shape = _data_shape(self.hdu._header)
self._tile_shape = _tile_shape(self.hdu._header)
self._n_dim = len(self._data_shape)
self._n_tiles = np.array(
_n_tiles(self._data_shape, self._tile_shape), dtype=int
)
@property
def shape(self):
return tuple(self._data_shape)
@property
def ndim(self):
return self.hdu._header["ZNAXIS"]
@property
def dtype(self):
return BITPIX2DTYPE[self.hdu._header["ZBITPIX"]]
def __getitem__(self, index):
# Shortcut if the whole data is requested (this is used by the
# data property, so we optimize it as it is frequently used)
if index is Ellipsis:
first_tile_index = np.zeros(self._n_dim, dtype=int)
last_tile_index = self._n_tiles - 1
data = decompress_image_data_section(
self.hdu.compressed_data,
self.hdu.compression_type,
self.hdu._header,
self.hdu,
first_tile_index,
last_tile_index,
)
return self.hdu._scale_data(data)
index = simplify_basic_index(index, shape=self._data_shape)
# Determine for each dimension the first and last tile to extract
first_tile_index = np.zeros(self._n_dim, dtype=int)
last_tile_index = np.zeros(self._n_dim, dtype=int)
final_array_index = []
for dim, idx in enumerate(index):
if isinstance(idx, slice):
if idx.step > 0:
first_tile_index[dim] = idx.start // self._tile_shape[dim]
last_tile_index[dim] = (idx.stop - 1) // self._tile_shape[dim]
else:
stop = 0 if idx.stop is None else max(idx.stop - 1, 0)
first_tile_index[dim] = stop // self._tile_shape[dim]
last_tile_index[dim] = idx.start // self._tile_shape[dim]
# Because slices such as slice(5, 0, 1) can exist (which
# would be empty) we need to make sure last_tile_index is
# always larger than first_tile_index
last_tile_index = np.maximum(last_tile_index, first_tile_index)
if idx.step < 0 and idx.stop is None:
final_array_index.append(idx)
else:
final_array_index.append(
slice(
idx.start - self._tile_shape[dim] * first_tile_index[dim],
idx.stop - self._tile_shape[dim] * first_tile_index[dim],
idx.step,
)
)
else:
first_tile_index[dim] = idx // self._tile_shape[dim]
last_tile_index[dim] = first_tile_index[dim]
final_array_index.append(
idx - self._tile_shape[dim] * first_tile_index[dim]
)
data = decompress_image_data_section(
self.hdu.compressed_data,
self.hdu.compression_type,
self.hdu._header,
self.hdu,
first_tile_index,
last_tile_index,
)
return self.hdu._scale_data(data[tuple(final_array_index)])
|
a1470e1c6b6c21afc53a66982a869c80d0a9a9f2abb42d60398d4dac154150f5 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import os
import re
import shutil
import sys
import warnings
import numpy as np
from astropy.io.fits.file import FILE_MODES, _File
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import (
_free_space_check,
_get_array_mmap,
_is_int,
_tmp_name,
fileobj_closed,
fileobj_mode,
ignore_sigint,
isfile,
)
from astropy.io.fits.verify import VerifyError, VerifyWarning, _ErrList, _Verify
from astropy.utils import indent
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
from astropy.utils.exceptions import AstropyUserWarning
from . import compressed
from .base import ExtensionHDU, _BaseHDU, _NonstandardHDU, _ValidHDU
from .groups import GroupsHDU
from .image import ImageHDU, PrimaryHDU
if HAS_BZ2:
import bz2
__all__ = ["HDUList", "fitsopen"]
# FITS file signature as per RFC 4047
FITS_SIGNATURE = b"SIMPLE = T"
def fitsopen(
name,
mode="readonly",
memmap=None,
save_backup=False,
cache=True,
lazy_load_hdus=None,
ignore_missing_simple=False,
*,
use_fsspec=None,
fsspec_kwargs=None,
**kwargs,
):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : str, file-like or `pathlib.Path`
File to be opened.
mode : str, optional
Open mode, 'readonly', 'update', 'append', 'denywrite', or
'ostream'. Default is 'readonly'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used? This value is obtained from the
configuration item ``astropy.io.fits.Conf.use_memmap``.
Default is `True`.
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that
a backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
Default is `False`.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache. Default is `True`.
lazy_load_hdus : bool, optional
To avoid reading all the HDUs and headers in a FITS file immediately
upon opening. This is an optimization especially useful for large
files, as FITS has no way of determining the number and offsets of all
the HDUs in a file without scanning through the file and reading all
the headers. Default is `True`.
To disable lazy loading and read all HDUs immediately (the old
behavior) use ``lazy_load_hdus=False``. This can lead to fewer
surprises--for example with lazy loading enabled, ``len(hdul)``
can be slow, as it means the entire FITS file needs to be read in
order to determine the number of HDUs. ``lazy_load_hdus=False``
ensures that all HDUs have already been loaded after the file has
been opened.
.. versionadded:: 1.3
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the central value and
``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data
with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as
``uint16`` data. Default is `True` so that the pseudo-unsigned
integer convention is assumed.
ignore_missing_end : bool, optional
Do not raise an exception when opening a file that is missing an
``END`` card in the last header. Default is `False`.
ignore_missing_simple : bool, optional
Do not raise an exception when the SIMPLE keyword is missing. Note
that io.fits will raise a warning if a SIMPLE card is present but
written in a way that does not follow the FITS Standard.
Default is `False`.
.. versionadded:: 4.2
checksum : bool, str, optional
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values
(when present in the HDU header) match the header and data of all HDU's
in the file. Updates to a file that already has a checksum will
preserve and update the existing checksums unless this argument is
given a value of 'remove', in which case the CHECKSUM and DATASUM
values are not checked, and are removed when saving changes to the
file. Default is `False`.
disable_image_compression : bool, optional
If `True`, treats compressed image HDU's like normal binary table
HDU's. Default is `False`.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. Default is `False`.
character_as_bytes : bool, optional
Whether to return bytes for string columns, otherwise unicode strings
are returned, but this does not respect memory mapping and loads the
whole column in memory when accessed. Default is `False`.
ignore_blank : bool, optional
If `True`, the BLANK keyword is ignored if present.
Default is `False`.
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled image
data, restore the data to the original type and reapply the original
BSCALE/BZERO values. This could lead to loss of accuracy if scaling
back to integer values after performing floating point operations on
the data. Default is `False`.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name`` starts with the Amazon S3 storage prefix ``s3://`` or the
Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g., ``http://``) but in this case you must
explicitly pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
Returns
-------
hdulist : `HDUList`
`HDUList` containing all of the header data units in the file.
"""
from astropy.io.fits import conf
if memmap is None:
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if conf.use_memmap else False
else:
memmap = bool(memmap)
if lazy_load_hdus is None:
lazy_load_hdus = conf.lazy_load_hdus
else:
lazy_load_hdus = bool(lazy_load_hdus)
if "uint" not in kwargs:
kwargs["uint"] = conf.enable_uint
if not name:
raise ValueError(f"Empty filename: {name!r}")
return HDUList.fromfile(
name,
mode,
memmap,
save_backup,
cache,
lazy_load_hdus,
ignore_missing_simple,
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
**kwargs,
)
class HDUList(list, _Verify):
"""
HDU list class. This is the top-level FITS object. When a FITS
file is opened, a `HDUList` object is returned.
"""
def __init__(self, hdus=[], file=None):
"""
Construct a `HDUList` object.
Parameters
----------
hdus : BaseHDU or sequence thereof, optional
The HDU object(s) to comprise the `HDUList`. Should be
instances of HDU classes like `ImageHDU` or `BinTableHDU`.
file : file-like, bytes, optional
The opened physical file associated with the `HDUList`
or a bytes object containing the contents of the FITS
file.
"""
if isinstance(file, bytes):
self._data = file
self._file = None
else:
self._file = file
self._data = None
# For internal use only--the keyword args passed to fitsopen /
# HDUList.fromfile/string when opening the file
self._open_kwargs = {}
self._in_read_next_hdu = False
# If we have read all the HDUs from the file or not
# The assumes that all HDUs have been written when we first opened the
# file; we do not currently support loading additional HDUs from a file
# while it is being streamed to. In the future that might be supported
# but for now this is only used for the purpose of lazy-loading of
# existing HDUs.
if file is None:
self._read_all = True
elif self._file is not None:
# Should never attempt to read HDUs in ostream mode
self._read_all = self._file.mode == "ostream"
else:
self._read_all = False
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise TypeError("Invalid input for HDUList.")
for idx, hdu in enumerate(hdus):
if not isinstance(hdu, _BaseHDU):
raise TypeError(f"Element {idx} in the HDUList input is not an HDU.")
super().__init__(hdus)
if file is None:
# Only do this when initializing from an existing list of HDUs
# When initializing from a file, this will be handled by the
# append method after the first HDU is read
self.update_extend()
def __len__(self):
if not self._in_read_next_hdu:
self.readall()
return super().__len__()
def __repr__(self):
# Special case: if the FITS file is located on a remote file system
# and has not been fully read yet, we return a simplified repr to
# avoid downloading the entire file. We can tell that a file is remote
# from the fact that the ``fsspec`` package was used to open it.
is_fsspec_file = self._file and "fsspec" in str(
self._file._file.__class__.__bases__
)
if not self._read_all and is_fsspec_file:
return f"{type(self)} (partially read)"
# In order to correctly repr an HDUList we need to load all the
# HDUs as well
self.readall()
return super().__repr__()
def __iter__(self):
# While effectively this does the same as:
# for idx in range(len(self)):
# yield self[idx]
# the more complicated structure is here to prevent the use of len(),
# which would break the lazy loading
for idx in itertools.count():
try:
yield self[idx]
except IndexError:
break
def __getitem__(self, key):
"""
Get an HDU from the `HDUList`, indexed by number or name.
"""
# If the key is a slice we need to make sure the necessary HDUs
# have been loaded before passing the slice on to super.
if isinstance(key, slice):
max_idx = key.stop
# Check for and handle the case when no maximum was
# specified (e.g. [1:]).
if max_idx is None:
# We need all of the HDUs, so load them
# and reset the maximum to the actual length.
max_idx = len(self)
# Just in case the max_idx is negative...
max_idx = self._positive_index_of(max_idx)
number_loaded = super().__len__()
if max_idx >= number_loaded:
# We need more than we have, try loading up to and including
# max_idx. Note we do not try to be clever about skipping HDUs
# even though key.step might conceivably allow it.
for i in range(number_loaded, max_idx):
# Read until max_idx or to the end of the file, whichever
# comes first.
if not self._read_next_hdu():
break
try:
hdus = super().__getitem__(key)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError(
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
else:
return HDUList(hdus)
# Originally this used recursion, but hypothetically an HDU with
# a very large number of HDUs could blow the stack, so use a loop
# instead
try:
return self._try_while_unread_hdus(
super().__getitem__, self._positive_index_of(key)
)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError(
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
def __contains__(self, item):
"""
Returns `True` if ``item`` is an ``HDU`` _in_ ``self`` or a valid
extension specification (e.g., integer extension number, extension
name, or a tuple of extension name and an extension version)
of a ``HDU`` in ``self``.
"""
try:
self._try_while_unread_hdus(self.index_of, item)
except (KeyError, ValueError):
return False
return True
def __setitem__(self, key, hdu):
"""
Set an HDU to the `HDUList`, indexed by number or name.
"""
_key = self._positive_index_of(key)
if isinstance(hdu, (slice, list)):
if _is_int(_key):
raise ValueError("An element in the HDUList must be an HDU.")
for item in hdu:
if not isinstance(item, _BaseHDU):
raise ValueError(f"{item} is not an HDU.")
else:
if not isinstance(hdu, _BaseHDU):
raise ValueError(f"{hdu} is not an HDU.")
try:
self._try_while_unread_hdus(super().__setitem__, _key, hdu)
except IndexError:
raise IndexError(f"Extension {key} is out of bound or not found.")
self._resize = True
self._truncate = False
def __delitem__(self, key):
"""
Delete an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
end_index = len(self)
else:
key = self._positive_index_of(key)
end_index = len(self) - 1
self._try_while_unread_hdus(super().__delitem__, key)
if key == end_index or key == -1 and not self._resize:
self._truncate = True
else:
self._truncate = False
self._resize = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
output_verify = self._open_kwargs.get("output_verify", "exception")
self.close(output_verify=output_verify)
@classmethod
def fromfile(
cls,
fileobj,
mode=None,
memmap=None,
save_backup=False,
cache=True,
lazy_load_hdus=True,
ignore_missing_simple=False,
**kwargs,
):
"""
Creates an `HDUList` instance from a file-like object.
The actual implementation of ``fitsopen()``, and generally shouldn't
be used directly. Use :func:`open` instead (and see its
documentation for details of the parameters accepted by this method).
"""
return cls._readfrom(
fileobj=fileobj,
mode=mode,
memmap=memmap,
save_backup=save_backup,
cache=cache,
ignore_missing_simple=ignore_missing_simple,
lazy_load_hdus=lazy_load_hdus,
**kwargs,
)
@classmethod
def fromstring(cls, data, **kwargs):
"""
Creates an `HDUList` instance from a string or other in-memory data
buffer containing an entire FITS file. Similar to
:meth:`HDUList.fromfile`, but does not accept the mode or memmap
arguments, as they are only relevant to reading from a file on disk.
This is useful for interfacing with other libraries such as CFITSIO,
and may also be useful for streaming applications.
Parameters
----------
data : str, buffer-like, etc.
A string or other memory buffer containing an entire FITS file.
Buffer-like objects include :class:`~bytes`, :class:`~bytearray`,
:class:`~memoryview`, and :class:`~numpy.ndarray`.
It should be noted that if that memory is read-only (such as a
Python string) the returned :class:`HDUList`'s data portions will
also be read-only.
**kwargs : dict
Optional keyword arguments. See
:func:`astropy.io.fits.open` for details.
Returns
-------
hdul : HDUList
An :class:`HDUList` object representing the in-memory FITS file.
"""
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype="ubyte", buffer=data)
except TypeError:
raise TypeError(
f"The provided object {data} does not contain an underlying "
"memory buffer. fromstring() requires an object that "
"supports the buffer interface such as bytes, buffer, "
"memoryview, ndarray, etc. This restriction is to ensure "
"that efficient access to the array/table data is possible."
)
return cls._readfrom(data=data, **kwargs)
def fileinfo(self, index):
"""
Returns a dictionary detailing information about the locations
of the indexed HDU within any associated file. The values are
only valid after a read or write of the associated file with
no intervening changes to the `HDUList`.
Parameters
----------
index : int
Index of HDU for which info is to be returned.
Returns
-------
fileinfo : dict or None
The dictionary details information about the locations of
the indexed HDU within an associated file. Returns `None`
when the HDU is not associated with a file.
Dictionary contents:
========== ========================================================
Key Value
========== ========================================================
file File object associated with the HDU
filename Name of associated file object
filemode Mode in which the file was opened (readonly,
update, append, denywrite, ostream)
resized Flag that when `True` indicates that the data has been
resized since the last read/write so the returned values
may not be valid.
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ========================================================
"""
if self._file is not None:
output = self[index].fileinfo()
if not output:
# OK, the HDU associated with this index is not yet
# tied to the file associated with the HDUList. The only way
# to get the file object is to check each of the HDU's in the
# list until we find the one associated with the file.
f = None
for hdu in self:
info = hdu.fileinfo()
if info:
f = info["file"]
fm = info["filemode"]
break
output = {
"file": f,
"filemode": fm,
"hdrLoc": None,
"datLoc": None,
"datSpan": None,
}
output["filename"] = self._file.name
output["resized"] = self._wasresized()
else:
output = None
return output
def __copy__(self):
"""
Return a shallow copy of an HDUList.
Returns
-------
copy : `HDUList`
A shallow copy of this `HDUList` object.
"""
return self[:]
# Syntactic sugar for `__copy__()` magic method
copy = __copy__
def __deepcopy__(self, memo=None):
return HDUList([hdu.copy() for hdu in self])
def pop(self, index=-1):
"""Remove an item from the list and return it.
Parameters
----------
index : int, str, tuple of (string, int), optional
An integer value of ``index`` indicates the position from which
``pop()`` removes and returns an HDU. A string value or a tuple
of ``(string, int)`` functions as a key for identifying the
HDU to be removed and returned. If ``key`` is a tuple, it is
of the form ``(key, ver)`` where ``ver`` is an ``EXTVER``
value that must match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous the numeric index
must be used to index the duplicate HDU.
Returns
-------
hdu : BaseHDU
The HDU object at position indicated by ``index`` or having name
and version specified by ``index``.
"""
# Make sure that HDUs are loaded before attempting to pop
self.readall()
list_index = self.index_of(index)
return super().pop(list_index)
def insert(self, index, hdu):
"""
Insert an HDU into the `HDUList` at the given ``index``.
Parameters
----------
index : int
Index before which to insert the new HDU.
hdu : BaseHDU
The HDU object to insert
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError(f"{hdu} is not an HDU.")
num_hdus = len(self)
if index == 0 or num_hdus == 0:
if num_hdus != 0:
# We are inserting a new Primary HDU so we need to
# make the current Primary HDU into an extension HDU.
if isinstance(self[0], GroupsHDU):
raise ValueError(
"The current Primary HDU is a GroupsHDU. "
"It can't be made into an extension HDU, "
"so another HDU cannot be inserted before it."
)
hdu1 = ImageHDU(self[0].data, self[0].header)
# Insert it into position 1, then delete HDU at position 0.
super().insert(1, hdu1)
super().__delitem__(0)
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().insert(0, phdu)
index = 1
else:
if isinstance(hdu, GroupsHDU):
raise ValueError("A GroupsHDU must be inserted as a Primary HDU.")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
hdu = ImageHDU(hdu.data, hdu.header)
super().insert(index, hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def append(self, hdu):
"""
Append a new HDU to the `HDUList`.
Parameters
----------
hdu : BaseHDU
HDU to add to the `HDUList`.
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError("HDUList can only append an HDU.")
if len(self) > 0:
if isinstance(hdu, GroupsHDU):
raise ValueError("Can't append a GroupsHDU to a non-empty HDUList")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
# TODO: This isn't necessarily sufficient to copy the HDU;
# _header_offset and friends need to be copied too.
hdu = ImageHDU(hdu.data, hdu.header)
else:
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary
# HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().append(phdu)
super().append(hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def index_of(self, key):
"""
Get the index of an HDU from the `HDUList`.
Parameters
----------
key : int, str, tuple of (string, int) or BaseHDU
The key identifying the HDU. If ``key`` is a tuple, it is of the
form ``(name, ver)`` where ``ver`` is an ``EXTVER`` value that must
match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous (it shouldn't be
but it's not impossible) the numeric index must be used to index
the duplicate HDU.
When ``key`` is an HDU object, this function returns the
index of that HDU object in the ``HDUList``.
Returns
-------
index : int
The index of the HDU in the `HDUList`.
Raises
------
ValueError
If ``key`` is an HDU object and it is not found in the ``HDUList``.
KeyError
If an HDU specified by the ``key`` that is an extension number,
extension name, or a tuple of extension name and version is not
found in the ``HDUList``.
"""
if _is_int(key):
return key
elif isinstance(key, tuple):
_key, _ver = key
elif isinstance(key, _BaseHDU):
return self.index(key)
else:
_key = key
_ver = None
if not isinstance(_key, str):
raise KeyError(
"{} indices must be integers, extension names as strings, "
"or (extname, version) tuples; got {}"
"".format(self.__class__.__name__, _key)
)
_key = (_key.strip()).upper()
found = None
for idx, hdu in enumerate(self):
name = hdu.name
if isinstance(name, str):
name = name.strip().upper()
# 'PRIMARY' should always work as a reference to the first HDU
if (name == _key or (_key == "PRIMARY" and idx == 0)) and (
_ver is None or _ver == hdu.ver
):
found = idx
break
if found is None:
raise KeyError(f"Extension {key!r} not found.")
else:
return found
def _positive_index_of(self, key):
"""
Same as index_of, but ensures always returning a positive index
or zero.
(Really this should be called non_negative_index_of but it felt
too long.)
This means that if the key is a negative integer, we have to
convert it to the corresponding positive index. This means
knowing the length of the HDUList, which in turn means loading
all HDUs. Therefore using negative indices on HDULists is inherently
inefficient.
"""
index = self.index_of(key)
if index >= 0:
return index
if abs(index) > len(self):
raise IndexError(f"Extension {index} is out of bound or not found.")
return len(self) + index
def readall(self):
"""
Read data of all HDUs into memory.
"""
while self._read_next_hdu():
pass
@ignore_sigint
def flush(self, output_verify="fix", verbose=False):
"""
Force a write of the `HDUList` back to the file (for append and
update modes only).
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
verbose : bool
When `True`, print verbose messages
"""
if self._file.mode not in ("append", "update", "ostream"):
warnings.warn(
f"Flush for '{self._file.mode}' mode is not supported.",
AstropyUserWarning,
)
return
save_backup = self._open_kwargs.get("save_backup", False)
if save_backup and self._file.mode in ("append", "update"):
filename = self._file.name
if os.path.exists(filename):
# The the file doesn't actually exist anymore for some reason
# then there's no point in trying to make a backup
backup = filename + ".bak"
idx = 1
while os.path.exists(backup):
backup = filename + ".bak." + str(idx)
idx += 1
warnings.warn(
f"Saving a backup of {filename} to {backup}.", AstropyUserWarning
)
try:
shutil.copy(filename, backup)
except OSError as exc:
raise OSError(
f"Failed to save backup to destination {filename}"
) from exc
self.verify(option=output_verify)
if self._file.mode in ("append", "ostream"):
for hdu in self:
if verbose:
try:
extver = str(hdu._header["extver"])
except KeyError:
extver = ""
# only append HDU's which are "new"
if hdu._new:
hdu._prewriteto(checksum=hdu._output_checksum)
with _free_space_check(self):
hdu._writeto(self._file)
if verbose:
print("append HDU", hdu.name, extver)
hdu._new = False
hdu._postwriteto()
elif self._file.mode == "update":
self._flush_update()
def update_extend(self):
"""
Make sure that if the primary header needs the keyword ``EXTEND`` that
it has it and it is correct.
"""
if not len(self):
return
if not isinstance(self[0], PrimaryHDU):
# A PrimaryHDU will be automatically inserted at some point, but it
# might not have been added yet
return
hdr = self[0].header
def get_first_ext():
try:
return self[1]
except IndexError:
return None
if "EXTEND" in hdr:
if not hdr["EXTEND"] and get_first_ext() is not None:
hdr["EXTEND"] = True
elif get_first_ext() is not None:
if hdr["NAXIS"] == 0:
hdr.set("EXTEND", True, after="NAXIS")
else:
n = hdr["NAXIS"]
hdr.set("EXTEND", True, after="NAXIS" + str(n))
def writeto(
self, fileobj, output_verify="exception", overwrite=False, checksum=False
):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : str, file-like or `pathlib.Path`
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
"""
if len(self) == 0:
warnings.warn("There is nothing to write.", AstropyUserWarning)
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, str) or fileobj_closed(fileobj)
mode = FILE_MODES[fileobj_mode(fileobj)] if isfile(fileobj) else "ostream"
# This can accept an open file object that's open to write only, or in
# append/update modes but only if the file doesn't exist.
fileobj = _File(fileobj, mode=mode, overwrite=overwrite)
hdulist = self.fromfile(fileobj)
try:
dirname = os.path.dirname(hdulist._file.name)
except (AttributeError, TypeError):
dirname = None
try:
with _free_space_check(self, dirname=dirname):
for hdu in self:
hdu._prewriteto(checksum=checksum)
hdu._writeto(hdulist._file)
hdu._postwriteto()
finally:
hdulist.close(output_verify=output_verify, closed=closed)
def close(self, output_verify="exception", verbose=False, closed=True):
"""
Close the associated FITS file and memmap object, if any.
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
verbose : bool
When `True`, print out verbose messages.
closed : bool
When `True`, close the underlying file object.
"""
try:
if (
self._file
and self._file.mode in ("append", "update")
and not self._file.closed
):
self.flush(output_verify=output_verify, verbose=verbose)
finally:
if self._file and closed and hasattr(self._file, "close"):
self._file.close()
# Give individual HDUs an opportunity to do on-close cleanup
for hdu in self:
hdu._close(closed=closed)
def info(self, output=None):
"""
Summarize the info of the HDUs in this `HDUList`.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file-like or bool, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the HDU info. Writes to ``sys.stdout`` by default.
"""
if output is None:
output = sys.stdout
if self._file is None:
name = "(No file associated with this HDUList)"
else:
name = self._file.name
results = [
f"Filename: {name}",
"No. Name Ver Type Cards Dimensions Format",
]
format = "{:3d} {:10} {:3} {:11} {:5d} {} {} {}"
default = ("", "", "", 0, (), "", "")
for idx, hdu in enumerate(self):
summary = hdu._summary()
if len(summary) < len(default):
summary += default[len(summary) :]
summary = (idx,) + summary
if output:
results.append(format.format(*summary))
else:
results.append(summary)
if output:
output.write("\n".join(results))
output.write("\n")
output.flush()
else:
return results[2:]
def filename(self):
"""
Return the file name associated with the HDUList object if one exists.
Otherwise returns None.
Returns
-------
filename : str
A string containing the file name associated with the HDUList
object if an association exists. Otherwise returns None.
"""
if self._file is not None:
if hasattr(self._file, "name"):
return self._file.name
return None
@classmethod
def _readfrom(
cls,
fileobj=None,
data=None,
mode=None,
memmap=None,
cache=True,
lazy_load_hdus=True,
ignore_missing_simple=False,
*,
use_fsspec=None,
fsspec_kwargs=None,
**kwargs,
):
"""
Provides the implementations from HDUList.fromfile and
HDUList.fromstring, both of which wrap this method, as their
implementations are largely the same.
"""
if fileobj is not None:
if not isinstance(fileobj, _File):
# instantiate a FITS file object (ffo)
fileobj = _File(
fileobj,
mode=mode,
memmap=memmap,
cache=cache,
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
)
# The Astropy mode is determined by the _File initializer if the
# supplied mode was None
mode = fileobj.mode
hdulist = cls(file=fileobj)
else:
if mode is None:
# The default mode
mode = "readonly"
hdulist = cls(file=data)
# This method is currently only called from HDUList.fromstring and
# HDUList.fromfile. If fileobj is None then this must be the
# fromstring case; the data type of ``data`` will be checked in the
# _BaseHDU.fromstring call.
if (
not ignore_missing_simple
and hdulist._file
and hdulist._file.mode != "ostream"
and hdulist._file.size > 0
):
pos = hdulist._file.tell()
# FITS signature is supposed to be in the first 30 bytes, but to
# allow reading various invalid files we will check in the first
# card (80 bytes).
simple = hdulist._file.read(80)
match_sig = simple[:29] == FITS_SIGNATURE[:-1] and simple[29:30] in (
b"T",
b"F",
)
if not match_sig:
# Check the SIMPLE card is there but not written correctly
match_sig_relaxed = re.match(rb"SIMPLE\s*=\s*[T|F]", simple)
if match_sig_relaxed:
warnings.warn(
"Found a SIMPLE card but its format doesn't"
" respect the FITS Standard",
VerifyWarning,
)
else:
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError(
"No SIMPLE card found, this file does not appear to "
"be a valid FITS file. If this is really a FITS file, "
"try with ignore_missing_simple=True"
)
hdulist._file.seek(pos)
# Store additional keyword args that were passed to fits.open
hdulist._open_kwargs = kwargs
if fileobj is not None and fileobj.writeonly:
# Output stream--not interested in reading/parsing
# the HDUs--just writing to the output file
return hdulist
# Make sure at least the PRIMARY HDU can be read
read_one = hdulist._read_next_hdu()
# If we're trying to read only and no header units were found,
# raise an exception
if not read_one and mode in ("readonly", "denywrite"):
# Close the file if necessary (issue #6168)
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError("Empty or corrupt FITS file")
if not lazy_load_hdus or kwargs.get("checksum") is True:
# Go ahead and load all HDUs
while hdulist._read_next_hdu():
pass
# initialize/reset attributes to be used in "update/append" mode
hdulist._resize = False
hdulist._truncate = False
return hdulist
def _try_while_unread_hdus(self, func, *args, **kwargs):
"""
Attempt an operation that accesses an HDU by index/name
that can fail if not all HDUs have been read yet. Keep
reading HDUs until the operation succeeds or there are no
more HDUs to read.
"""
while True:
try:
return func(*args, **kwargs)
except Exception:
if self._read_next_hdu():
continue
else:
raise
def _read_next_hdu(self):
"""
Lazily load a single HDU from the fileobj or data string the `HDUList`
was opened from, unless no further HDUs are found.
Returns True if a new HDU was loaded, or False otherwise.
"""
if self._read_all:
return False
saved_compression_enabled = compressed.COMPRESSION_ENABLED
fileobj, data, kwargs = self._file, self._data, self._open_kwargs
if fileobj is not None and fileobj.closed:
return False
try:
self._in_read_next_hdu = True
if (
"disable_image_compression" in kwargs
and kwargs["disable_image_compression"]
):
compressed.COMPRESSION_ENABLED = False
# read all HDUs
try:
if fileobj is not None:
try:
# Make sure we're back to the end of the last read
# HDU
if len(self) > 0:
last = self[len(self) - 1]
if last._data_offset is not None:
offset = last._data_offset + last._data_size
fileobj.seek(offset, os.SEEK_SET)
hdu = _BaseHDU.readfrom(fileobj, **kwargs)
except EOFError:
self._read_all = True
return False
except OSError:
# Close the file: see
# https://github.com/astropy/astropy/issues/6168
#
if self._file.close_on_error:
self._file.close()
if fileobj.writeonly:
self._read_all = True
return False
else:
raise
else:
if not data:
self._read_all = True
return False
hdu = _BaseHDU.fromstring(data, **kwargs)
self._data = data[hdu._data_offset + hdu._data_size :]
super().append(hdu)
if len(self) == 1:
# Check for an extension HDU and update the EXTEND
# keyword of the primary HDU accordingly
self.update_extend()
hdu._new = False
if "checksum" in kwargs:
hdu._output_checksum = kwargs["checksum"]
# check in the case there is extra space after the last HDU or
# corrupted HDU
except (VerifyError, ValueError) as exc:
warnings.warn(
"Error validating header for HDU #{} (note: Astropy "
"uses zero-based indexing).\n{}\n"
"There may be extra bytes after the last HDU or the "
"file is corrupted.".format(len(self), indent(str(exc))),
VerifyWarning,
)
del exc
self._read_all = True
return False
finally:
compressed.COMPRESSION_ENABLED = saved_compression_enabled
self._in_read_next_hdu = False
return True
def _verify(self, option="warn"):
errs = _ErrList([], unit="HDU")
# the first (0th) element must be a primary HDU
if (
len(self) > 0
and (not isinstance(self[0], PrimaryHDU))
and (not isinstance(self[0], _NonstandardHDU))
):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = "Fixed by inserting one as 0th HDU."
def fix(self=self):
self.insert(0, PrimaryHDU())
err = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)
errs.append(err)
if len(self) > 1 and (
"EXTEND" not in self[0].header or self[0].header["EXTEND"] is not True
):
err_text = (
"Primary HDU does not contain an EXTEND keyword "
"equal to T even though there are extension HDUs."
)
fix_text = "Fixed by inserting or updating the EXTEND keyword."
def fix(header=self[0].header):
naxis = header["NAXIS"]
if naxis == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(naxis)
header.set("EXTEND", value=True, after=after)
errs.append(
self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)
)
# each element calls their own verify
for idx, hdu in enumerate(self):
if idx > 0 and (not isinstance(hdu, ExtensionHDU)):
err_text = f"HDUList's element {idx} is not an extension HDU."
err = self.run_option(option, err_text=err_text, fixable=False)
errs.append(err)
else:
result = hdu._verify(option)
if result:
errs.append(result)
return errs
def _flush_update(self):
"""Implements flushing changes to a file in update mode."""
for hdu in self:
# Need to all _prewriteto() for each HDU first to determine if
# resizing will be necessary
hdu._prewriteto(checksum=hdu._output_checksum, inplace=True)
try:
self._wasresized()
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
hdu._header._modified = False
finally:
for hdu in self:
hdu._postwriteto()
def _flush_resize(self):
"""
Implements flushing changes in update mode when parts of one or more HDU
need to be resized.
"""
old_name = self._file.name
old_memmap = self._file.memmap
name = _tmp_name(old_name)
if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
if self._file.compression == "gzip":
new_file = gzip.GzipFile(name, mode="ab+")
elif self._file.compression == "bzip2":
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module."
)
new_file = bz2.BZ2File(name, mode="w")
else:
new_file = name
with self.fromfile(new_file, mode="append") as hdulist:
for hdu in self:
hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith("win"):
# Collect a list of open mmaps to the data; this well be
# used later. See below.
mmaps = [
(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self)
if hdu._has_data
]
hdulist._file.close()
self._file.close()
if sys.platform.startswith("win"):
# Close all open mmaps to the data. This is only necessary on
# Windows, which will not allow a file to be renamed or deleted
# until all handles to that file have been closed.
for idx, mmap, arr in mmaps:
if mmap is not None:
mmap.close()
os.remove(self._file.name)
# reopen the renamed new file with "update" mode
os.rename(name, old_name)
os.chmod(old_name, old_mode)
if isinstance(new_file, gzip.GzipFile):
old_file = gzip.GzipFile(old_name, mode="rb+")
else:
old_file = old_name
ffo = _File(old_file, mode="update", memmap=old_memmap)
self._file = ffo
for hdu in self:
# Need to update the _file attribute and close any open mmaps
# on each HDU
if hdu._has_data and _get_array_mmap(hdu.data) is not None:
del hdu.data
hdu._file = ffo
if sys.platform.startswith("win"):
# On Windows, all the original data mmaps were closed above.
# However, it's possible that the user still has references to
# the old data which would no longer work (possibly even cause
# a segfault if they try to access it). This replaces the
# buffers used by the original arrays with the buffers of mmap
# arrays created from the new file. This seems to work, but
# it's a flaming hack and carries no guarantees that it won't
# lead to odd behavior in practice. Better to just not keep
# references to data from files that had to be resized upon
# flushing (on Windows--again, this is no problem on Linux).
for idx, mmap, arr in mmaps:
if mmap is not None:
# https://github.com/numpy/numpy/issues/8628
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
arr.data = self[idx].data.data
del mmaps # Just to be sure
else:
# The underlying file is not a file object, it is a file like
# object. We can't write out to a file, we must update the file
# like object in place. To do this, we write out to a temporary
# file, then delete the contents in our file like object, then
# write the contents of the temporary file to the now empty file
# like object.
self.writeto(name)
hdulist = self.fromfile(name)
ffo = self._file
ffo.truncate(0)
ffo.seek(0)
for hdu in hdulist:
hdu._writeto(ffo, inplace=True, copy=True)
# Close the temporary file and delete it.
hdulist.close()
os.remove(hdulist._file.name)
# reset the resize attributes after updating
self._resize = False
self._truncate = False
for hdu in self:
hdu._header._modified = False
hdu._new = False
hdu._file = ffo
def _wasresized(self, verbose=False):
"""
Determine if any changes to the HDUList will require a file resize
when flushing the file.
Side effect of setting the objects _resize attribute.
"""
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
nbytes = len(str(hdu._header))
if nbytes != (hdu._data_offset - hdu._header_offset):
self._resize = True
self._truncate = False
if verbose:
print("One or more header is resized.")
break
# Data:
if not hdu._has_data:
continue
nbytes = hdu.size
nbytes = nbytes + _pad_length(nbytes)
if nbytes != hdu._data_size:
self._resize = True
self._truncate = False
if verbose:
print("One or more data area is resized.")
break
if self._truncate:
try:
self._file.truncate(hdu._data_offset + hdu._data_size)
except OSError:
self._resize = True
self._truncate = False
return self._resize
|
bdbb957c5f11a126665cd4d4961ed5f1bfa2a5f7a39efc169c2681ef6518f339 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import numpy as np
import pytest
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.io.fits.column import NUMPY2FITS, ColumnAttribute, Delayed
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from astropy.table import Table
from astropy.units import Unit, UnitsWarning, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from .conftest import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == "float32" or bb.dtype.name == "float32":
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.0
if np.any(mask0):
if diff[mask0].max() != 0.0:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == "S":
fielda = decode_ascii(fielda)
if fieldb.dtype.char == "S":
fieldb = decode_ascii(fieldb)
if not isinstance(fielda, type(fieldb)) and not isinstance(
fieldb, type(fielda)
):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f"field {i} type differs")
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
elif isinstance(fielda, fits.column._VLF) or isinstance(
fieldb, fits.column._VLF
):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f"fielda[{row}]: {fielda[row]}")
print(f"fieldb[{row}]: {fieldb[row]}")
print(f"field {i} differs in row {row}")
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [
k for k, v in fits.Column.__dict__.items() if isinstance(v, ColumnAttribute)
]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr["FILENAME"] = "labq01i3q_rawtag.fits"
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert thdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self, home_is_data):
# open some existing FITS files:
tt = fits.open(self.data("tb.fits"))
fd = fits.open(self.data("test0.fits"))
# create some local arrays
a1 = chararray.array(["abc", "def", "xx"])
r1 = np.array([11.0, 12.0, 13.0], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name="abc", format="3A", array=a1)
c2 = fits.Column(name="def", format="E", array=r1)
a3 = np.array([3, 4, 5], dtype="i2")
c3 = fits.Column(name="xyz", format="I", array=a3)
a4 = np.array([1, 2, 3], dtype="i2")
c4 = fits.Column(name="t1", format="I", array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype="c8")
c5 = fits.Column(name="t2", format="C", array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name="t3", format="X", array=a6)
a7 = np.array([101, 102, 103], dtype="i4")
c7 = fits.Column(name="t4", format="J", array=a7)
a8 = np.array(
[
[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
],
dtype=np.uint8,
)
c8 = fits.Column(name="t5", format="11X", array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view("bool")).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field("abc")) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp("tableout1.fits"), overwrite=True)
with fits.open(self.temp("tableout1.fits")) as f2:
exp = [True, True, False, True, False, True, True, True, False, False, True]
temp = f2[1].data.field(7)
assert (temp[0] == exp).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp("tableout2.fits"), "append")
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data("tb.fits"))
assert t[1].header["tform1"] == "1J"
info = {
"name": ["c1", "c2", "c3", "c4"],
"format": ["1J", "3A", "1E", "1L"],
"unit": ["", "", "", ""],
"null": [-2147483647, "", "", ""],
"bscale": ["", "", 3, ""],
"bzero": ["", "", 0.4, ""],
"disp": ["I11", "A3", "G15.7", "L6"],
"start": ["", "", "", ""],
"dim": ["", "", "", ""],
"coord_inc": ["", "", "", ""],
"coord_type": ["", "", "", ""],
"coord_unit": ["", "", "", ""],
"coord_ref_point": ["", "", "", ""],
"coord_ref_value": ["", "", "", ""],
"time_ref_pos": ["", "", "", ""],
}
assert t[1].columns.info(output=False) == info
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field("c4")[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, "c4")) == "[84 84]"
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data("ascii.fits"))
ra1 = np.rec.array(
[
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345),
],
names="c1, c2",
)
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names="c1, c2")
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array(
[(10.123000144958496, 37), (15.609999656677246, 17), (345.0, 345)],
names="c1, c2",
)
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(["abcd", "def"])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name="abc", format="A3", start=19, array=a1)
c2 = fits.Column(name="def", format="E", start=3, array=r1)
c3 = fits.Column(name="t1", format="I", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert dict(hdu.data.dtype.fields) == {
"abc": (np.dtype("|S3"), 18),
"def": (np.dtype("|S15"), 2),
"t1": (np.dtype("|S10"), 21),
}
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11.0, 12.0])
c2 = fits.Column(name="def", format="D", array=r1, bscale=2.3, bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with open(self.temp("toto.fits")) as f:
assert "4.95652173913043548D+00" in f.read()
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name="t2", format="I2", array=[91, 92, 93])
c2 = fits.Column(name="t4", format="I5", array=[91, 92, 93])
c3 = fits.Column(name="t8", format="I10", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype="uint8")
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
hduL = fits.open(self.temp("testendian.fits"))
rfiHDU = hduL["RFI"]
data = rfiHDU.data
channelsOut = data.field("Channels")[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1.0, 2.0, 3.0, 4.0]
a1 = np.array(a, dtype="<f8")
a2 = np.array(a, dtype=">f8")
col1 = fits.Column(name="a", format="D", array=a1)
col2 = fits.Column(name="b", format="D", array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data["a"] == a1).all()
assert (tbhdu.data["b"] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
with fits.open(self.temp("testendian.fits")) as hdul:
assert (hdul[1].data["a"] == a2).all()
assert (hdul[1].data["b"] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float32,a10",
names="order,name,mag,Sp",
)
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "S20", "float32", "S10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "U20", "float32", "U10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == "Serius"
assert hdu.data[1][1] == "Canopys"
assert (hdu.data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == "A1V"
assert hdu.data[1][3] == "F0Ib"
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert (hdul[1].data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == "Serius"
assert hdul[1].data[1][1] == "Canopys"
assert (
hdul[1].data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)
).all()
assert hdul[1].data[0][3] == "A1V"
assert hdul[1].data[1][3] == "F0Ib"
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array(
[(1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib")],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data("tb.fits")) as h:
data = h[1].data
new_data = np.array([(3, "qwe", 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith("FITS_rec(")
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert t1[1].columns._arrays[1] is t1[1].columns.columns[1].array
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp("newtable.fits"))
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 19, "8R x 5C", "[10A, J, 10A, 5E, L]", ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
("NGC5", 412, "", z, False),
("NGC6", 434, "", z, True),
("NGC7", 408, "", z, False),
("NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
col = fits.Column(name="a", array=np.array([1, 2]), format="K")
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ["target", "V_mag", "a"]
array = np.rec.array(
[("NGC1001", 11.1, 1), ("NGC1002", 12.3, 2), ("NGC1003", 15.2, 0)],
formats="a20,f4,i8",
)
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
tbhdu.columns.del_col("flag")
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z),
("NGC2", 334, "", z),
("NGC3", 308, "", z),
("NCG4", 317, "", z),
],
formats="a10,u4,a10,5f4",
)
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col("counts")
tbhdu.columns.del_col("notes")
assert tbhdu.columns.names == ["target", "spectrum"]
array = np.rec.array(
[("NGC1", z), ("NGC2", z), ("NGC3", z), ("NCG4", z)], formats="a10,5f4"
)
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
tbhdu.columns.del_col("V_mag")
assert tbhdu.columns.names == ["target"]
array = np.rec.array([("NGC1001",), ("NGC1002",), ("NGC1003",)], formats="a20")
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target1", format="10A", array=names)
c2 = fits.Column(name="counts1", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes1", format="A10")
c4 = fits.Column(name="spectrum1", format="5E")
c5 = fits.Column(name="flag1", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp("newtable.fits"))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
columns_info = "[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]"
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 30, "4R x 10C", columns_info, ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
assert hdu.columns.names == [
"target",
"counts",
"notes",
"spectrum",
"flag",
"target1",
"counts1",
"notes1",
"spectrum1",
"flag1",
]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {"a": 2, "b": "b", "c": 2.3}
data = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "S1"), ("c", float)],
)
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
header = hdul[1].header
assert header["TNULL1"] == 2
assert header["TNULL2"] == "b"
assert header["TNULL3"] == 2.3
def test_multidimension_table_from_numpy_rec_columns(self):
"""Regression test for https://github.com/astropy/astropy/issues/5280
and https://github.com/astropy/astropy/issues/5287
multidimentional tables can now be written with the correct TDIM.
Author: Stephen Bailey.
"""
dtype = [
("x", (str, 5)), # 1D column of 5-character strings
("y", (str, 3), (4,)), # 2D column; each row is four 3-char strings
]
data = np.zeros(2, dtype=dtype)
data["x"] = ["abcde", "xyz"]
data["y"][0] = ["A", "BC", "DEF", "123"]
data["y"][1] = ["X", "YZ", "PQR", "999"]
table = Table(data)
# Test convenience functions io.fits.writeto / getdata
fits.writeto(self.temp("test.fits"), data)
dx = fits.getdata(self.temp("test.fits"))
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test fits.BinTableHDU(data) and avoid convenience functions
hdu0 = fits.PrimaryHDU()
hdu1 = fits.BinTableHDU(data)
hx = fits.HDUList([hdu0, hdu1])
hx.writeto(self.temp("test2.fits"))
fx = fits.open(self.temp("test2.fits"))
dx = fx[1].data
fx.close()
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test Table write and read
table.write(self.temp("test3.fits"))
tx = Table.read(self.temp("test3.fits"), character_as_bytes=False)
assert table["x"].dtype == tx["x"].dtype
assert table["y"].dtype == tx["y"].dtype
assert np.all(table["x"] == tx["x"]), f"x: {table['x']} != {tx['x']}"
assert np.all(table["y"] == tx["y"]), f"y: {table['y']} != {tx['y']}"
def test_mask_array(self):
t = fits.open(self.data("table.fits"))
tbdata = t[1].data
mask = tbdata.field("V_mag") > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp("newtable.fits"))
hdul = fits.open(self.temp("newtable.fits"))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
row = t1[1].data[2]
assert row["counts"] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ""
assert (c == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)).all()
row["counts"] = 310
assert row["counts"] == 310
row[1] = 315
assert row["counts"] == 315
assert row[1:4]["counts"] == 315
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
assert row["counts"] == 300
row[1:4][0] = 400
assert row[1:4]["counts"] == 400
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]["counts"] == 500
row[1:4:2][0] = 300
assert row[1:4]["counts"] == 300
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
assert row[1:4].field(0) == 300
assert row[1:4].field("counts") == 300
pytest.raises(KeyError, row[1:4].field, "flag")
row[1:4].setfield("counts", 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, "flag", False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name="target", format="10A")
c2 = fits.Column(name="counts", format="J", unit="DN")
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L")
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = (
"NGC1",
312,
"A Note",
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True,
)
# Test assigning data to a tables row using a list
tbhdu.data[3] = [
"JIM1",
"33",
"A Note",
np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32),
True,
]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == "NGC1"
assert tbhdu.columns.columns[2].array[0] == ""
assert (
tbhdu.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == "JIM1"
assert tbhdu.columns.columns[2].array[3] == "A Note"
assert (
tbhdu.columns.columns[3].array[3]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[3] == np.True_), (bool, np.bool_)
)
and v
)
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.data._coldefs._arrays[0]
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns.columns[0].array
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns._arrays[0]
)
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == "NGC1"
assert tbhdu2.columns.columns[2].array[0] == ""
assert (
tbhdu2.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == "NGC5"
assert tbhdu2.columns.columns[2].array[4] == ""
assert (
tbhdu2.columns.columns[3].array[4]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[4] == np.False_), (bool, np.bool_)
)
and v
)
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ""
assert tbhdu2.columns.columns[2].array[8] == ""
assert (
tbhdu2.columns.columns[3].array[8]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[8] == np.False_), (bool, np.bool_)
)
and v
)
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert id(coldefs.columns[0].array) != id(tbhdu.columns.columns[0].array)
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.data._coldefs._arrays[0]
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns.columns[0].array
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns._arrays[0]
)
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = hducls(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = hducls(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert "EXTVER" not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header["EXTVER"] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header["EXTVER"] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header["EXTVER"] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name="spam", format="E", array=[42.0])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name="flag", format="2L", array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (
tbhdu1.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu1.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (
tbhdu.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data("table.fits"))
assert (tbdata.V_mag == tbdata.field("V_mag")).all()
assert (tbdata.V_mag == tbdata["V_mag"]).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data("tb.fits"))
for col in ("c1", "c2", "c3", "c4"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data("ascii.fits"))
for col in ("a", "b"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(
name="x",
format="PI()",
array=np.array([[45, 56], [11, 12, 13]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data["x"]) == type(hdu.data.x)
assert (hdu.data["x"][0] == hdu.data.x[0]).all()
assert (hdu.data["x"][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data("zerowidth.fits"))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert "ORBPARM" in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.writeto(self.temp("newtable.fits"))
hdul.close()
hdul = fits.open(self.temp("newtable.fits"))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert "ORBPARM" in tbhdu.columns.names
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.close()
def test_string_column_padding(self):
a = ["img1", "img2", "img3a", "p"]
s = (
"img1\x00\x00\x00\x00\x00\x00"
"img2\x00\x00\x00\x00\x00\x00"
"img3a\x00\x00\x00\x00\x00"
"p\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
acol = fits.Column(name="MEMNAME", format="A10", array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode("raw-unicode-escape") == s
ahdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s.replace(
"\x00", " "
)
assert (hdul[1].data["MEMNAME"] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[
([0, 1, 2, 3, 4, 5], "row1" * 2),
([6, 7, 8, 9, 0, 1], "row2" * 2),
([2, 3, 4, 5, 6, 7], "row3" * 2),
],
formats="6i4,a8",
)
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits"), mode="update") as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header["TDIM1"] = "(2,3)"
hdul[1].header["TDIM2"] = "(4,2)"
with fits.open(self.temp("newtable.fits")) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (
c1
== np.array(
[
[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]],
]
)
).all()
assert (
c2 == np.array([["row1", "row1"], ["row2", "row2"], ["row3", "row3"]])
).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", 4)])
data["x"] = 1, 2, 3
data["s"] = "ok"
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", (4, 3))])
data["x"] = 1, 2, 3
data["s"] = "ok"
del t
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1,), (2,)], dtype=([("x", "i4", (1,))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("onedtable.fits"))
with fits.open(self.temp("onedtable.fits")) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header["TDIM1"] == "(1)"
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b"abcd", b"efgh"], [b"ijkl", b"mnop"], [b"qrst", b"uvwx"]]
arr = np.array(
[(data,), (data,), (data,), (data,), (data,)], dtype=[("S", "(3, 2)S4")]
)
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(4,2,3)"
assert tbhdu2.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
assert np.all(tbhdu2.data["S"] == tbhdu.data["S"])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b"ab", b"cd"], [b"ef", b"gh"], [b"ij", b"kl"]]
arr2 = [1, 2, 3, 4, 5]
arr = np.array(
[(arr1, arr2), (arr1, arr2)], dtype=[("a", "(3, 2)S2"), ("b", "5i8")]
)
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp("test.fits"), "wb") as f:
f.write(raw_bytes.replace(b"(2,2,3)", b"(2,2,2)"))
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(2,2,2)"
assert tbhdu2.header["TFORM1"] == "12A"
for row in tbhdu2.data:
assert np.all(row["a"] == [["ab", "cd"], ["ef", "gh"]])
assert np.all(row["b"] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [["abc", "def", "ghi"], ["jkl", "mno", "pqr"], ["stu", "vwx", "yz "]]
recarr = np.rec.array([(data,), (data,)], formats=["(3,3)S3"])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
with fits.open(self.temp("test.fits")) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(["a", "b"], dtype="|S1")
arrb = np.array([["a", "bc"], ["cd", "e"]], dtype="|S2")
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name="str", format="1A", array=arra),
fits.Column(name="strarray", format="4A", dim="(2,2)", array=arrb),
fits.Column(name="intarray", format="4I", dim="(2, 2)", array=arrc),
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data["str"].encode("ascii") == arra).all()
assert (h[1].data["strarray"].encode("ascii") == arrb).all()
assert (h[1].data["intarray"] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [
fits.Column(name="a", format="20I", dim="(2,2)", array=arra),
fits.Column(name="b", format="4I", dim="(2,2)", array=arrb),
]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM1"] == "20I"
assert h[1].header["TFORM2"] == "4I"
assert h[1].header["TDIM1"] == h[1].header["TDIM2"] == "(2,2)"
assert (h[1].data["a"] == arra).all()
assert (h[1].data["b"] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(
VerifyError, fits.Column, name="a", format="2I", dim="(2,2)", array=arra
)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data("tdim.fits")) as hdulist:
assert hdulist[1].data["V_mag"].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
targets = data.field("target")
s = data[:]
assert (s.field("target") == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field("target") == targets[:n]).all()
s = data[n:]
assert (s.field("target") == targets[n:]).all()
s = data[::2]
assert (s.field("target") == targets[::2]).all()
s = data[::-1]
assert (s.field("target") == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data("table.fits")) as hdu:
data = hdu[1].data
data["V_mag"] = 0
assert np.all(data["V_mag"] == 0)
data["V_mag"] = 1
assert np.all(data["V_mag"] == 1)
for container in (list, tuple, np.array):
data["V_mag"] = container([1, 2, 3])
assert np.array_equal(data["V_mag"], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data("table.fits"), mode="readonly") as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array(
[("a", [1, 2, 3, 4], 0.1), ("b", [5, 6, 7, 8], 0.2)], formats="a1,4i4,f8"
)
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name="c0", format="L", array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name="c2", format="B", array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name="c3", format="I", array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name="c4", format="J", array=a4)
a5 = np.array(["a", "abc", "ab"])
c5 = fits.Column(name="c5", format="A3", array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name="c6", format="D", array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j], dtype=np.complex128)
c7 = fits.Column(name="c7", format="M", array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name="c8", format="PJ()", array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp("data.txt")
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name="names", format="I", array=[1])
c2 = fits.Column(name="formats", format="I", array=[2])
c3 = fits.Column(name="other", format="I", array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ["names", "formats", "other"]
assert t.data.formats == ["I"] * 3
assert (t.data["names"] == [1]).all()
assert (t.data["formats"] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats="|b1,|b1")
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp("table.fits"))
data = fits.getdata(self.temp("table.fits"), ext=1)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[("a", "?")])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data["a"] == arr["a"]).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column("F1", "L", array=[True, False])
c2 = fits.Column("F2", "L", array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp("table.fits"))
with fits.open(self.temp("table.fits"), mode="update") as hdul:
hdul[1].data["F1"][1] = True
hdul[1].data["F2"][0] = True
with fits.open(self.temp("table.fits")) as hdul:
assert (hdul[1].data["F1"] == [True, True]).all()
assert (hdul[1].data["F2"] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column(
"F1",
"A3",
null="---",
array=np.array(["1.0", "2.0", "---", "3.0"]),
ascii=True,
)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp("test.fits"))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp("test.fits"), mode="update") as h:
h[1].header["TFORM1"] = "E3"
del h[1].header["TNULL1"]
with fits.open(self.temp("test.fits")) as h:
pytest.raises(ValueError, lambda: h[1].data["F1"])
try:
with fits.open(self.temp("test.fits")) as h:
h[1].data["F1"]
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data"
)
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = " "
c1 = fits.Column(
"F1",
format="I8",
null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True,
)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp("ascii_null.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null.fits"), mode="r+") as h:
nulled = h.read().replace("2 ", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null.fits"), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = "NaN"
c2 = fits.Column(
"F1",
format="F12.8",
null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True,
)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp("ascii_null2.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null2.fits"), mode="r+") as h:
nulled = h.read().replace("3.00000000", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null2.fits"), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("tb.fits")) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["NAXIS"] == 2
assert h[1].header["NAXIS1"] == 12
assert h[1].header["NAXIS2"] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data("table.fits")) as h:
h[1].writeto(self.temp("test.fits"))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert "data" not in h[1].__dict__
with fits.open(self.data("table.fits")) as h1:
with fits.open(self.temp("test.fits")) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data("table.fits"))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data("tb.fits")) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata["c1"] == tbdata2["c1"])
assert np.all(tbdata["c2"] == tbdata2["c2"])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(
tbdata["c3"].astype(np.float32) == tbdata2["c3"].astype(np.float32)
)
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata["c4"], "T", "F") == tbdata2["c4"])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match="Field 2 has a repeat count of 0"):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[("a", "i8"), ("b", "S64"), ("c", ("i4", (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header["NAXIS1"] == 96
assert hdu.header["NAXIS2"] == 0
assert hdu.header["TDIM3"] == "(2,3)"
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data("random_groups.fits"))["DATA"]
col = fits.Column(name="TEST", array=data, dim="(3,1,128,1,1)", format="1152E")
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[1].data["TEST"] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data("tb.fits"))
data2 = fits.getdata(self.data("tb.fits"))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1) :] = data2
mask = merged["c1"] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data("tb.fits")))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([("abc",)], dtype=[("a", "S3")])
fits.writeto(self.temp("test.fits"), data)
with fits.open(self.temp("test.fits"), mode="update") as hdul:
hdul[1].data["a"][0] = "XYZ"
assert hdul[1].data["a"][0] == "XYZ"
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].data["a"][0] == "XYZ"
# Test update but with a non-trivial TDIMn
data = np.array(
[([["abc", "def", "geh"], ["ijk", "lmn", "opq"]],)],
dtype=[("a", ("S3", (2, 3)))],
)
fits.writeto(self.temp("test2.fits"), data)
expected = [["abc", "def", "geh"], ["ijk", "XYZ", "opq"]]
with fits.open(self.temp("test2.fits"), mode="update") as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data["a"][0, 1, 1] = "XYZ"
assert np.all(hdul[1].data["a"][0] == expected)
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
assert np.all(hdul[1].data["a"][0] == expected)
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting("FITS_rec"):
readfile(self.data("memtest.fits"))
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
@pytest.mark.slow
def test_reference_leak2(self, tmp_path):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_connect import TestMultipleHDU
from .test_core import TestCore
t1 = TestCore()
t1.setup_method()
try:
with _refcounting("FITS_rec"):
t1.test_add_del_columns2()
finally:
t1.teardown_method()
del t1
t2 = self.__class__()
for test_name in [
"test_recarray_to_bintablehdu",
"test_numpy_ndarray_to_bintablehdu",
"test_new_table_from_recarray",
"test_new_fitsrec",
]:
t2.setup_method()
try:
with _refcounting("FITS_rec"):
getattr(t2, test_name)()
finally:
t2.teardown_method()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting("FITS_rec"):
t3.test_read(tmp_path)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data("table.fits")) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
msg = (
r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\."
)
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name="A", format="1J", bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
# Test that the file wrote out correctly
with fits.open(self.temp("test.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == data)
# Test updating the unsigned int data
hdu.data["A"][0] = 99
hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(
name="c1",
array=np.array([1], dtype=">i2"),
format="1I",
bscale=1,
bzero=32768,
)
S = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data["c1"][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data["c1"] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data["c1"][0] = 10
assert X[1].data["c1"][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data["c1"][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -(2**22), 10, 2**23], dtype="i4")
i10 = np.array([2**8, 2**31 - 1, -(2**29), 30, 2**31 - 1], dtype="i8")
i20 = np.array([2**16, 2**63 - 1, -(2**63), 40, 2**63 - 1], dtype="i8")
i02 = np.array([2**8, 2**13, -(2**9), 50, 2**13], dtype="i2")
t0 = Table([i08, i08 * 2, i10, i20, i02])
t1 = Table.read(self.data("ascii_i4-i20.fits"))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
def test_ascii_floattypes(self):
"""Test different float formats."""
col1 = fits.Column(
name="a", format="D", array=np.array([11.1, 12.2]), ascii=True
)
col2 = fits.Column(
name="b", format="D16", array=np.array([15.5, 16.6]), ascii=True
)
col3 = fits.Column(
name="c", format="D16.7", array=np.array([1.1, 2.2]), ascii=True
)
hdu = fits.TableHDU.from_columns([col1, col2, col3])
hdu.writeto(self.temp("foo.fits"))
with fits.open(self.temp("foo.fits"), memmap=False) as hdul:
assert comparerecords(hdul[1].data, hdu.data)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert (
len(objgraph.by_type(type_)) <= refcount
), "More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[[0] * 1571] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as toto:
q = toto[1].data.field("QUAL_SPE")
assert (q[0][4:8] == np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith("J(1571)")
for code in ("PJ()", "QJ()"):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name="TESTVLF", format=format_code, array=arr)
col2 = fits.Column(name="TESTSCA", format="J", array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data["TESTSCA"]) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data["TESTVLF"]) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data["TESTVLF"][0] == arr[0]).all()
assert (tb_hdu.data["TESTVLF"][9] == arr[9]).all()
assert (tb_hdu.data["TESTVLF"][10] == ([0] * 10)).all()
assert (tb_hdu.data["TESTVLF"][-1] == ([0] * 10)).all()
for code in ("PJ()", "QJ()"):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array(
[np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array(
[np.array(["a", "b", "c"]), np.array(["d", "e"]), np.array(["f"])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ["a", "ab", "abc"]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[np.arange(1572)] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
data = fits.getdata(self.temp("toto.fits"))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data["QUAL_SPE"], col.array):
assert (row_a == row_b).all()
for code in ("PJ()", "QJ()"):
test(code)
@pytest.mark.skipif(
not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == "win32",
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column("test", format="J", array=np.arange(255))
c1 = fits.Column("A", format="PJ", array=arr1)
c2 = fits.Column("B", format="PJ", array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp("test.fits"), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM2"] == "PJ(255)"
assert h[2].header["TFORM2"] == "PJ(255)"
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp("test.fits")) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp("test2.fits"))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp("test2.fits"), mode="append") as new_hdul:
for _ in range(2):
with fits.open(self.temp("test.fits")) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp("test2.fits")) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data("theap-gap.fits"))
data = hdul[1].data
assert data.shape == (500,)
assert data["i"][497] == 497
assert np.array_equal(data["arr"][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name="var",
format="PI()",
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data["var"].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data("variable_length_table.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data["var"].tolist() == [[45, 56], [11, 12, 13]]
@pytest.mark.skipif(sys.maxsize < 2**32, reason="requires 64-bit system")
@pytest.mark.skipif(sys.platform == "win32", reason="Cannot test on Windows")
@pytest.mark.hugemem
def test_heapsize_P_limit(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10812
Check if the error is raised when the heap size is bigger than what can be
indexed with a 32 bit signed int.
"""
# a matrix with variable length array elements is created
nelem = 2**28
matrix = np.zeros(1, dtype=np.object_)
matrix[0] = np.arange(0.0, float(nelem + 1))
col = fits.Column(name="MATRIX", format=f"PD({nelem})", unit="", array=matrix)
t = fits.BinTableHDU.from_columns([col])
t.name = "MATRIX"
with pytest.raises(
ValueError, match="Please consider using the 'Q' format for your file."
):
t.writeto(self.temp("matrix.fits"))
@pytest.mark.skipif(sys.maxsize < 2**32, reason="requires 64-bit system")
@pytest.mark.skipif(sys.platform == "win32", reason="Cannot test on Windows")
@pytest.mark.hugemem
def test_heapsize_Q_limit(self):
"""
Regression test for https://github.com/astropy/astropy/issues/14808
Check if the error is no longer raised when the heap size is bigger than what can be
indexed with a 32 bit signed int.
"""
# a matrix with variable length array elements is created
nelem = 2**28
matrix = np.zeros(1, dtype=np.object_)
matrix[0] = np.arange(0.0, float(nelem + 1))
col = fits.Column(name="MATRIX", format=f"QD({nelem})", unit="", array=matrix)
t = fits.BinTableHDU.from_columns([col])
t.name = "MATRIX"
t.writeto(self.temp("matrix.fits"))
def test_empty_vla_raw_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/12881
Check if empty vla are correctly read.
"""
columns = [
fits.Column(name="integer", format="B", array=(1, 2)),
fits.Column(name="empty", format="PJ", array=([], [])),
]
fits.BinTableHDU.from_columns(columns).writeto(self.temp("bug.fits"))
with fits.open(self.temp("bug.fits")) as hdu:
# We can't compare the whole array since the _VLF is an array of
# objects, hence we compare elementwise
for i in range(len(hdu[1].data["empty"])):
assert np.array_equal(
hdu[1].data["empty"][i], np.array([], dtype=np.int32)
)
def test_multidim_VLA_tables(self):
"""
Check if multidimensional VLF are correctly write and read.
See https://github.com/astropy/astropy/issues/12860
and https://github.com/astropy/astropy/issues/7810
"""
a = np.arange(5)
b = np.arange(7)
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(7)", dim="(7,1)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdus:
print(hdus[1].data["test"][0])
assert hdus[1].columns.formats == ["PD(7)"]
assert np.array_equal(
hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0, 3.0, 4.0]])
)
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
)
a = np.arange(10).reshape((5, 2))
b = np.arange(14).reshape((7, 2))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(14)", dim="(2,7)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(14)"]
assert np.array_equal(
hdus[1].data["test"][0],
np.array([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]),
)
assert np.array_equal(
hdus[1].data["test"][1],
np.array(
[
[0.0, 1.0],
[2.0, 3.0],
[4.0, 5.0],
[6.0, 7.0],
[8.0, 9.0],
[10.0, 11.0],
[12.0, 13.0],
]
),
)
a = np.arange(3).reshape((1, 3))
b = np.arange(6).reshape((2, 3))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(6)", dim="(3,2)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(6)"]
assert np.array_equal(hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0]]))
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]])
)
def test_heterogeneous_VLA_tables(self):
"""
Check the behaviour of heterogeneous VLA object.
"""
# The column format fix the type of the arrays in the VLF object.
a = np.array([45, 30])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
c1 = fits.Column(name="var", format="PJ()", array=var)
hdu = fits.BinTableHDU.from_columns([c1])
assert hdu.data[0].array.dtype[0].subdtype[0] == "int32"
# Strings in the VLF object can't be added to the table
a = np.array([45, "thirty"])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
c1 = fits.Column(name="var", format="PJ()", array=var)
with pytest.raises(
ValueError, match=r"invalid literal for int\(\) with base 10"
):
fits.BinTableHDU.from_columns([c1])
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column("TEST", np.dtype(recformat))
c.format == fitsformat
c = fits.Column("TEST", recformat)
c.format == fitsformat
c = fits.Column("TEST", fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column("TEST", "I4")
assert c.format == "I4"
assert c.format.format == "I"
assert c.format.width == 4
c = fits.Column("TEST", "F15.8")
assert c.format == "F15.8"
assert c.format.format == "F"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "E15.8")
assert c.format.format == "E"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "D15.8")
assert c.format.format == "D"
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column("TEST", "F10.0")
assert c.format.format == "F"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "E10.0")
assert c.format.format == "E"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "D10.0")
assert c.format.format == "D"
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column("TEST", "I")
assert c.format == "I"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I", ascii=True)
assert c.format == "I10"
assert c.format.recformat == "i4"
# With specified widths, integer precision should be set appropriately
c = fits.Column("TEST", "I4", ascii=True)
assert c.format == "I4"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I9", ascii=True)
assert c.format == "I9"
assert c.format.recformat == "i4"
c = fits.Column("TEST", "I12", ascii=True)
assert c.format == "I12"
assert c.format.recformat == "i8"
c = fits.Column("TEST", "E")
assert c.format == "E"
assert c.format.recformat == "f4"
c = fits.Column("TEST", "E", ascii=True)
assert c.format == "E15.7"
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column("TEST", "F")
assert c.format == "F16.7"
c = fits.Column("TEST", "D")
assert c.format == "D"
assert c.format.recformat == "f8"
c = fits.Column("TEST", "D", ascii=True)
assert c.format == "D25.17"
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column("TEST", "F5.0", array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["TFORM1"] == "F5.0"
assert hdul[1].data["TEST"].dtype == np.dtype("float64")
assert np.all(hdul[1].data["TEST"] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, "TEST")
assert raw.tobytes() == b" 1. 2. 3."
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[("A", "<u4", (2,)), ("B", ">u2")])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs["A"].bzero
assert 2**15 == col_defs["B"].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(
UserWarning,
match=r"Field 2 has a repeat count of 0 in its format code",
):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name="a", format="D")
b = fits.Column(name="b", format="D")
cols = fits.ColDefs([a, b])
assert cols["a"] == cols[0]
assert cols["b"] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns(
[fits.Column("a", format="D"), fits.Column("b", format="D")]
)
b = table.columns["b"]
table.columns.del_col("b")
assert table.data.dtype.names == ("a",)
b.name = "HELLO"
assert b.name == "HELLO"
assert "TTYPE2" not in table.header
assert table.header["TTYPE1"] == "a"
assert table.columns.names == ["a"]
with pytest.raises(KeyError):
table.columns["b"]
# Make sure updates to the remaining column still work
table.columns.change_name("a", "GOODBYE")
with pytest.raises(KeyError):
table.columns["a"]
assert table.columns["GOODBYE"].name == "GOODBYE"
assert table.data.dtype.names == ("GOODBYE",)
assert table.columns.names == ["GOODBYE"]
assert table.data.columns.names == ["GOODBYE"]
table.columns["GOODBYE"].name = "foo"
with pytest.raises(KeyError):
table.columns["GOODBYE"]
assert table.columns["foo"].name == "foo"
assert table.data.dtype.names == ("foo",)
assert table.columns.names == ["foo"]
assert table.data.columns.names == ["foo"]
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column("xcol", format="5X", array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column("pcol", format="PJ", array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column("qcol", format="QJ", array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format="I", array=[1, 2, 3, 4, 5])
assert "Column name must be a string able to fit" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column(
"col",
format=0,
null="Nan",
disp=1,
coord_type=1,
coord_unit=2,
coord_inc="1",
time_ref_pos=1,
coord_ref_point="1",
coord_ref_value="1",
)
err_msgs = [
"keyword arguments to Column were invalid",
"TFORM",
"TNULL",
"TDISP",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
]
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="B", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="-56", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(
err.value
)
@pytest.mark.parametrize(
"keys",
[
{"TFORM": "Z", "TDISP": "E"},
{"TFORM": "2", "TDISP": "2E"},
{"TFORM": 3, "TDISP": 6.3},
{"TFORM": float, "TDISP": np.float64},
{"TFORM": "", "TDISP": "E.5"},
],
)
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("col", format=keys["TFORM"], disp=keys["TDISP"])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name="a", array=x, format="E")
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header["TTYPE1"]
hdu.columns[0].name = "b"
def test_table_to_hdu():
from astropy.table import Table
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
table.meta["foo"] = "bar"
with pytest.warns(
UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.BinTableHDU(table, header=fits.Header({"TEST": 1}))
assert len(w) == 1
for name in "abc":
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert hdu.header["FOO"] == "bar"
assert hdu.header["TEST"] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)], dtype=[("x", float), ("y", int)]).view(
fits.FITS_rec
)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmp_path):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr["TUNIT1"] = "pixel"
hdr["TUNIT2"] = "m"
hdr["TUNIT3"] = "m"
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr["TCTYP2"] = "RA---TAN"
hdr["TCTYP3"] = "ANGLE"
hdr["TCRVL2"] = -999.0
hdr["TCRVL3"] = -999.0
hdr["TCRPX2"] = 1.0
hdr["TCRPX3"] = 1.0
hdr["TALEN2"] = 16384
hdr["TALEN3"] = 1024
hdr["TCUNI2"] = "angstrom"
hdr["TCUNI3"] = "deg"
# Other non-relevant keywords
hdr["RA"] = 1.5
hdr["DEC"] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special"
)
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == "s"
assert hdu.columns[1].unit == "pixel"
assert hdu.columns[2].unit is None
assert hdu.header["TUNIT1"] == "s"
assert hdu.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert "TCTYP1" not in hdu.header
assert hdu.header["TCTYP2"] == "RA---TAN"
assert hdu.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu.header["RA"] == 1.5
assert hdu.header["DEC"] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attributes to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmp_path / "test.fits"
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == "s"
assert hdu2.columns[1].unit == "pixel"
assert hdu2.columns[2].unit is None
assert hdu2.header["TUNIT1"] == "s"
assert hdu2.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == "RA---TAN"
assert hdu2.columns[2].coord_type == "ANGLE"
assert "TCTYP1" not in hdu2.header
assert hdu2.header["TCTYP2"] == "RA---TAN"
assert hdu2.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu2.header["RA"] == 1.5
assert hdu2.header["DEC"] == 3.0
def test_empty_table(tmp_path):
ofile = tmp_path / "emptytable.fits"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
ofile = tmp_path / "emptytable.fits.gz"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
def test_a3dtable(tmp_path):
testfile = tmp_path / "test.fits"
hdu = fits.BinTableHDU.from_columns(
[fits.Column(name="FOO", format="J", array=np.arange(10))]
)
hdu.header["XTENSION"] = "A3DTABLE"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].header["XTENSION"] == "A3DTABLE"
with pytest.warns(AstropyUserWarning) as w:
hdul.verify("fix")
assert str(w[0].message) == "Verification reported errors:"
assert str(w[2].message).endswith("Converted the XTENSION keyword to BINTABLE.")
assert hdul[1].header["XTENSION"] == "BINTABLE"
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header["FOO"] = None
hdu.header.cards["FOO"]._value = np.nan
testfile = tmp_path / "test.fits"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / "invalid_unit.fits"
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = "1 / (MeV sr s)"
unit = Unit(invalid_unit)
t = Table({"a": [1, 2, 3]})
t.write(path)
with fits.open(path, mode="update") as hdul:
hdul[1].header["TUNIT1"] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t["a"].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict="silent")
assert isinstance(t["a"].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict="raise")
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict="warn")
|
c85baa58691697ed6e8a6014f83eea43073411ba4473e529d215a68f38a7c619 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import io
import os
import subprocess
import sys
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.hdu.base import _NonstandardHDU, _ValidHDU
from astropy.io.fits.verify import VerifyError, VerifyWarning
from astropy.utils.data import get_pkg_data_filenames
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
class TestHDUListFunctions(FitsTestCase):
def test_update_name(self):
with fits.open(self.data("o4sp040b0_raw.fits")) as hdul:
hdul[4].name = "Jim"
hdul[4].ver = 9
assert hdul[("JIM", 9)].header["extname"] == "JIM"
def test_hdu_file_bytes(self):
with fits.open(self.data("checksum.fits")) as hdul:
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
def test_fileinfo(**kwargs):
assert res["datSpan"] == kwargs.get("datSpan", 2880)
assert res["resized"] == kwargs.get("resized", False)
assert res["filename"] == self.data("checksum.fits")
assert res["datLoc"] == kwargs.get("datLoc", 8640)
assert res["hdrLoc"] == kwargs.get("hdrLoc", 0)
assert res["filemode"] == "readonly"
with fits.open(self.data("checksum.fits")) as hdul:
res = hdul.fileinfo(0)
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145
Ensure that a validation error occurs when saving an HDUList containing
multiple PrimaryHDUs.
"""
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
pytest.raises(
VerifyError, hdul.writeto, self.temp("temp.fits"), output_verify="exception"
)
def test_append_primary_to_empty_list(self):
# Tests appending a Simple PrimaryHDU to an empty HDUList.
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_extension_to_empty_list(self):
"""Tests appending a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, "PRIMARY", 1, "PrimaryHDU", 4, (100,), "int32", "")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_table_extension_to_empty_list(self):
"""Tests appending a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data("tb.fits")) as hdul1:
hdul.append(hdul1[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_groupshdu_to_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_primary_to_non_empty_list(self):
"""Tests appending a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data("arange.fits")) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 7, (11, 10, 7), "int32", ""),
(1, "", 1, "ImageHDU", 6, (100,), "int32", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_extension_to_non_empty_list(self):
"""Tests appending a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data("tb.fits")) as hdul:
hdul.append(hdul[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 11, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
(2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-append.fits"))
assert fits.info(self.temp("test-append.fits"), output=False) == info
def test_append_groupshdu_to_non_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.append(hdu)
def test_insert_primary_to_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_extension_to_empty_list(self):
"""Tests inserting a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, "PRIMARY", 1, "PrimaryHDU", 4, (100,), "int32", "")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_table_extension_to_empty_list(self):
"""Tests inserting a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data("tb.fits")) as hdul1:
hdul.insert(0, hdul1[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters")]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_primary_to_non_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data("arange.fits")) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 7, (11, 10, 7), "int32", ""),
(1, "", 1, "ImageHDU", 6, (100,), "int32", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_extension_to_non_empty_list(self):
"""Tests inserting a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data("tb.fits")) as hdul:
hdul.insert(1, hdul[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 11, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
(2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.insert(1, hdu)
info = [
(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters"),
(1, "", 1, "ImageHDU", 6, (100,), "int32", ""),
]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
"""
Tests inserting a Simple GroupsHDU to the beginning of an HDUList
that that already contains a GroupsHDU.
"""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
with pytest.raises(ValueError):
hdul.insert(0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
# Tests inserting a Simple ExtensionHDU to a non-empty HDUList.
with fits.open(self.data("tb.fits")) as hdul:
hdul.insert(0, hdul[1])
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
(2, "", 1, "ImageHDU", 12, (), "", ""),
(3, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
"""
Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList
as the primary HDU.
"""
with fits.open(self.data("tb.fits")) as hdul:
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", ""),
(1, "", 1, "ImageHDU", 12, (), "", ""),
(2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""),
]
assert hdul.info(output=False) == info
hdul.writeto(self.temp("test-insert.fits"))
assert fits.info(self.temp("test-insert.fits"), output=False) == info
def test_filename(self, home_is_data):
"""Tests the HDUList filename method."""
with fits.open(self.data("tb.fits")) as hdul:
name = hdul.filename()
assert name == os.path.expanduser(self.data("tb.fits"))
def test_file_like(self):
"""
Tests the use of a file like object with no tell or seek methods
in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto()
"""
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp("tmpfile.fits"), "wb")
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert fits.info(self.temp("tmpfile.fits"), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp("tmpfile.fits"), "wb")
hdul = fits.open(tmpfile, mode="ostream")
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert fits.info(self.temp("tmpfile.fits"), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp("tmpfile.fits"), "wb")
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")]
assert fits.info(self.temp("tmpfile.fits"), output=False) == info
def test_shallow_copy(self):
"""
Tests that `HDUList.__copy__()` and `HDUList.copy()` return a
shallow copy (regression test for #7211).
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
for hdulcopy in (hdul.copy(), copy.copy(hdul)):
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
assert hdulcopy[0] is hdul[0]
assert hdulcopy[1] is hdul[1]
def test_deep_copy(self):
"""
Tests that `HDUList.__deepcopy__()` returns a deep copy.
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
hdulcopy = copy.deepcopy(hdul)
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
for index in range(len(hdul)):
assert hdulcopy[index] is not hdul[index]
assert hdulcopy[index].header == hdul[index].header
np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data)
def test_new_hdu_extname(self):
"""
Tests that new extension HDUs that are added to an HDUList can be
properly indexed by their EXTNAME/EXTVER (regression test for
ticket:48).
"""
with fits.open(self.data("test0.fits")) as f:
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdu = fits.ImageHDU(header=f[1].header)
hdul.append(hdu)
assert hdul[1].header["EXTNAME"] == "SCI"
assert hdul[1].header["EXTVER"] == 1
assert hdul.index_of(("SCI", 1)) == 1
assert hdul.index_of(hdu) == len(hdul) - 1
def test_update_filelike(self):
"""Test opening a file-like object in update mode and resizing the
HDU.
"""
sf = io.BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode="update")
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
"""Test flushing changes to a file opened in a read only mode."""
oldmtime = os.stat(self.data("test0.fits")).st_mtime
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header["FOO"] = "BAR"
with pytest.warns(AstropyUserWarning, match="mode is not supported") as w:
hdul.flush()
assert len(w) == 1
assert oldmtime == os.stat(self.data("test0.fits")).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header["EXTEND"]
hdul.verify("silentfix")
assert "EXTEND" in hdul[0].header
assert hdul[0].header["EXTEND"] is True
def test_fix_malformed_naxisj(self):
"""
Tests that malformed NAXISj values are fixed sensibly.
"""
hdu = fits.open(self.data("arange.fits"))
# Malform NAXISj header data
hdu[0].header["NAXIS1"] = 11.0
hdu[0].header["NAXIS2"] = "10.0"
hdu[0].header["NAXIS3"] = "7"
# Axes cache needs to be malformed as well
hdu[0]._axes = [11.0, "10.0", "7"]
# Perform verification including the fix
hdu.verify("silentfix")
# Check that malformed data was converted
assert hdu[0].header["NAXIS1"] == 11
assert hdu[0].header["NAXIS2"] == 10
assert hdu[0].header["NAXIS3"] == 7
hdu.close()
def test_fix_wellformed_naxisj(self):
"""
Tests that wellformed NAXISj values are not modified.
"""
hdu = fits.open(self.data("arange.fits"))
# Fake new NAXISj header data
hdu[0].header["NAXIS1"] = 768
hdu[0].header["NAXIS2"] = 64
hdu[0].header["NAXIS3"] = 8
# Axes cache needs to be faked as well
hdu[0]._axes = [768, 64, 8]
# Perform verification including the fix
hdu.verify("silentfix")
# Check that malformed data was converted
assert hdu[0].header["NAXIS1"] == 768
assert hdu[0].header["NAXIS2"] == 64
assert hdu[0].header["NAXIS3"] == 8
hdu.close()
def test_new_hdulist_extend_keyword(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114
Tests that adding a PrimaryHDU to a new HDUList object updates the
EXTEND keyword on that HDU.
"""
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array([10]))
hdul = fits.HDUList([hdu, sci])
assert "EXTEND" in hdu.header
assert hdu.header["EXTEND"] is True
hdul.writeto(self.temp("temp.fits"))
hdr = fits.getheader(self.temp("temp.fits"))
assert "EXTEND" in hdr
assert hdr["EXTEND"] is True
def test_replace_memmaped_array(self, home_is_temp):
# Copy the original before we modify it
with fits.open(self.data("test0.fits")) as hdul:
hdul.writeto(self.temp("temp.fits"))
hdul = fits.open(self.temp("temp.fits"), mode="update", memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
with fits.open(self.temp("temp.fits"), memmap=True) as hdul:
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_bad_file_padding(self):
"""
Test warning when opening files with extra padding at the end.
See https://github.com/astropy/astropy/issues/4351
"""
# write some arbitrary data to a FITS file
fits.writeto(self.temp("temp.fits"), np.arange(100))
# append some arbitrary number of zeros to the end
with open(self.temp("temp.fits"), "ab") as fobj:
fobj.write(b"\x00" * 1234)
with pytest.warns(
AstropyUserWarning, match="Unexpected extra padding at the end of the file."
) as w:
with fits.open(self.temp("temp.fits")) as fobj:
fobj.info()
assert len(w) == 1
@pytest.mark.filterwarnings("ignore:Unexpected extra padding")
def test_open_file_with_end_padding(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106
Open files with end padding bytes.
"""
with fits.open(self.data("test0.fits"), do_not_scale_image_data=True) as hdul:
info = hdul.info(output=False)
hdul.writeto(self.temp("temp.fits"))
with open(self.temp("temp.fits"), "ab") as f:
f.seek(0, os.SEEK_END)
f.write(b"\0" * 2880)
assert info == fits.info(
self.temp("temp.fits"), output=False, do_not_scale_image_data=True
)
def test_open_file_with_bad_header_padding(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136
Open files with nulls for header block padding instead of spaces.
"""
a = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp("temp.fits"))
# Figure out where the header padding begins and fill it with nulls
end_card_pos = str(hdu.header).index("END" + " " * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp("temp.fits"), "r+b") as f:
f.seek(padding_start)
f.write(b"\0" * padding_len)
with pytest.warns(
AstropyUserWarning, match="contains null bytes instead of spaces"
) as w:
with fits.open(self.temp("temp.fits")) as hdul:
assert (hdul[0].data == a).all()
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
def test_update_with_truncated_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148
Test that saving an update where the header is shorter than the
original header doesn't leave a stump from the old header in the file.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header[f"TEST{idx}"] = idx
idx += 1
hdu.writeto(self.temp("temp.fits"), checksum=True)
with fits.open(self.temp("temp.fits"), mode="update") as hdul:
# Modify the header, forcing it to be rewritten
hdul[0].header["TEST1"] = 2
with fits.open(self.temp("temp.fits")) as hdul:
assert (hdul[0].data == data).all()
def test_update_resized_header(self, home_is_temp):
"""
Test saving updates to a file where the header is one block smaller
than before, and in the case where the header is one block larger than
before.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header[f"TEST{idx}"] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp("temp.fits"))
with fits.open(self.temp("temp.fits"), mode="update") as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp("temp.fits")) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp("temp.fits"), mode="update") as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f"TEST{idx}"] = idx
idx += 1
# Touch something in the data too so that it has to be rewritten
hdul[0].data[0] = 27
with fits.open(self.temp("temp.fits")) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self, home_is_temp):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150
This is similar to test_update_resized_header, but specifically tests a
case of multiple consecutive flush() calls on the same HDUList object,
where each flush() requires a resize.
"""
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp("temp.fits"))
with fits.open(self.temp("temp.fits"), mode="append") as hdul:
hdul.append(hdu)
with fits.open(self.temp("temp.fits"), mode="update") as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f"TEST{idx}"] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp("temp.fits")) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
def test_hdul_fromstring(self):
"""
Test creating the HDUList structure in memory from a string containing
an entire FITS file. This is similar to test_hdu_fromstring but for an
entire multi-extension FITS file at once.
"""
# Tests HDUList.fromstring for all of Astropy's built in test files
def test_fromstring(filename):
with fits.open(filename) as hdul:
orig_info = hdul.info(output=False)
with open(filename, "rb") as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif hdul[idx].data.dtype.fields and hdul2[idx].data.dtype.fields:
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif any(dim == 0 for dim in hdul[idx].data.shape) or any(
dim == 0 for dim in hdul2[idx].data.shape
):
# For some reason some combinations of Python and Numpy
# on Windows result in MemoryErrors when trying to work
# on memmap arrays with more than one dimension but
# some dimensions of size zero, so include a special
# case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data, hdul2[idx].data)
for filename in get_pkg_data_filenames("data", pattern="*.fits"):
if sys.platform == "win32" and filename.endswith("zerowidth.fits"):
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See ticket:
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
elif filename.endswith(("variable_length_table.fits", "theap-gap.fits")):
# Comparing variable length arrays is non-trivial and thus
# skipped at this point.
# TODO: That's probably possible, so one could make it work.
continue
test_fromstring(filename)
# Test that creating an HDUList from something silly raises a TypeError
pytest.raises(TypeError, fits.HDUList.fromstring, ["a", "b", "c"])
@pytest.mark.filterwarnings("ignore:Saving a backup")
def test_save_backup(self, home_is_temp):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121
Save backup of file before flushing changes.
"""
self.copy_file("scale.fits")
with fits.open(
self.temp("scale.fits"), mode="update", save_backup=True
) as hdul:
# Make some changes to the original file to force its header
# and data to be rewritten
hdul[0].header["TEST"] = "TEST"
# This emits warning that needs to be ignored at the
# pytest.mark.filterwarnings level.
hdul[0].data[0] = 0
assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak")))
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul1:
with fits.open(
self.temp("scale.fits.bak"), do_not_scale_image_data=True
) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with fits.open(
self.temp("scale.fits"), mode="update", save_backup=True
) as hdul:
# One more time to see if multiple backups are made
hdul[0].header["TEST2"] = "TEST"
hdul[0].data[0] = 1
assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak")))
assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak.1")))
def test_replace_mmap_data(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp("test_a.fits"), overwrite=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp("test_b.fits"), overwrite=True)
with fits.open(
self.temp("test_a.fits"), mode="update", memmap=mmap_a
) as hdul_a:
with fits.open(self.temp("test_b.fits"), memmap=mmap_b) as hdul_b:
hdul_a[0].data = hdul_b[0].data
with fits.open(self.temp("test_a.fits")) as hdul_a:
assert np.all(hdul_a[0].data == arr_b)
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work. Like test_replace_mmap_data but with
table data instead of image data.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name="a", format="J", array=arr_a)
col_b = fits.Column(name="b", format="J", array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp("test_a.fits"), overwrite=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp("test_b.fits"), overwrite=True)
with fits.open(
self.temp("test_a.fits"), mode="update", memmap=mmap_a
) as hdul_a:
with fits.open(self.temp("test_b.fits"), memmap=mmap_b) as hdul_b:
hdul_a[1].data = hdul_b[1].data
with fits.open(self.temp("test_a.fits")) as hdul_a:
assert "b" in hdul_a[1].columns.names
assert "a" not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data["b"] == arr_b)
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_extname_in_hdulist(self):
"""
Tests to make sure that the 'in' operator works.
Regression test for https://github.com/astropy/astropy/issues/3060
"""
with fits.open(self.data("o4sp040b0_raw.fits")) as hdulist:
hdulist.append(fits.ImageHDU(name="a"))
assert "a" in hdulist
assert "A" in hdulist
assert ("a", 1) in hdulist
assert ("A", 1) in hdulist
assert "b" not in hdulist
assert ("a", 2) not in hdulist
assert ("b", 1) not in hdulist
assert ("b", 2) not in hdulist
assert hdulist[0] in hdulist
assert fits.ImageHDU() not in hdulist
def test_overwrite(self, home_is_temp):
hdulist = fits.HDUList([fits.PrimaryHDU()])
hdulist.writeto(self.temp("test_overwrite.fits"))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdulist.writeto(self.temp("test_overwrite.fits"), overwrite=False)
hdulist.writeto(self.temp("test_overwrite.fits"), overwrite=True)
def test_invalid_hdu_key_in_contains(self):
"""
Make sure invalid keys in the 'in' operator return False.
Regression test for https://github.com/astropy/astropy/issues/5583
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
# A more or less random assortment of things which are not valid keys.
bad_keys = [None, 3.5, {}]
for key in bad_keys:
assert key not in hdulist
def test_iteration_of_lazy_loaded_hdulist(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5585
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU(name="SCI"))
hdulist.append(fits.ImageHDU(name="SCI"))
hdulist.append(fits.ImageHDU(name="nada"))
hdulist.append(fits.ImageHDU(name="SCI"))
filename = self.temp("many_extension.fits")
hdulist.writeto(filename)
f = fits.open(filename)
# Check that all extensions are read if f is not sliced
all_exts = list(f)
assert len(all_exts) == 5
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Try a simple slice with no conditional on the ext. This is essentially
# the reported failure.
all_exts_but_zero = list(f[1:])
assert len(all_exts_but_zero) == 4
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Check whether behavior is proper if the upper end of the slice is not
# omitted.
read_exts = [ext for ext in f[1:4] if ext.header["EXTNAME"] == "SCI"]
assert len(read_exts) == 2
f.close()
def test_read_non_standard_hdu(self):
filename = self.temp("bad-fits.fits")
hdu = fits.PrimaryHDU()
hdu.header["FOO"] = "BAR"
buf = io.BytesIO()
hdu.writeto(buf)
buf.seek(0)
hdustr = buf.read()
hdustr = hdustr.replace(
b"SIMPLE = T", b"SIMPLE = F"
)
with open(filename, mode="wb") as f:
f.write(hdustr)
with fits.open(filename) as hdul:
assert isinstance(hdul[0], _NonstandardHDU)
assert hdul[0].header["FOO"] == "BAR"
def test_proper_error_raised_on_non_fits_file(self):
filename = self.temp("not-fits.fits")
with open(filename, mode="w", encoding="utf=8") as f:
f.write("Not a FITS file")
match = (
"No SIMPLE card found, this file does not appear to be a valid FITS file"
)
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match=match):
fits.open(filename)
with pytest.raises(OSError, match=match):
fits.open(filename, mode="append")
with pytest.raises(OSError, match=match):
fits.open(filename, mode="update")
def test_proper_error_raised_on_invalid_fits_file(self):
filename = self.temp("bad-fits.fits")
hdu = fits.PrimaryHDU()
hdu.header["FOO"] = "BAR"
buf = io.BytesIO()
hdu.writeto(buf)
# write 80 additional bytes so the block will have the correct size
buf.write(b" " * 80)
buf.seek(0)
buf.seek(80) # now remove the SIMPLE card
with open(filename, mode="wb") as f:
f.write(buf.read())
match = (
"No SIMPLE card found, this file does not appear to be a valid FITS file"
)
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match=match):
fits.open(filename)
with pytest.raises(OSError, match=match):
fits.open(filename, mode="append")
with pytest.raises(OSError, match=match):
fits.open(filename, mode="update")
with fits.open(filename, ignore_missing_simple=True) as hdul:
assert isinstance(hdul[0], _ValidHDU)
assert hdul[0].header["FOO"] == "BAR"
def test_warning_raised_on_non_standard_simple_card(self):
filename = self.temp("bad-fits.fits")
hdu = fits.PrimaryHDU()
hdu.header["FOO"] = "BAR"
buf = io.BytesIO()
hdu.writeto(buf)
# change the simple card format
buf.seek(0)
buf.write(b"SIMPLE = T ")
buf.seek(0)
with open(filename, mode="wb") as f:
f.write(buf.read())
match = "Found a SIMPLE card but its format doesn't respect the FITS Standard"
with pytest.warns(VerifyWarning, match=match):
fits.open(filename)
with pytest.warns(VerifyWarning, match=match):
fits.open(filename, mode="append")
with pytest.warns(VerifyWarning, match=match):
fits.open(filename, mode="update")
with fits.open(filename, ignore_missing_simple=True) as hdul:
assert isinstance(hdul[0], _ValidHDU)
assert hdul[0].header["FOO"] == "BAR"
# change the simple card format
buf.seek(0)
buf.write(b"SIMPLE = T / This is a FITS file")
buf.seek(0)
with open(filename, mode="wb") as f:
f.write(buf.read())
with pytest.warns(VerifyWarning, match=match):
fits.open(filename)
def test_proper_error_raised_on_non_fits_file_with_unicode(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5594
The failure shows up when (in python 3+) you try to open a file
with unicode content that is not actually a FITS file. See:
https://github.com/astropy/astropy/issues/5594#issuecomment-266583218
"""
filename = self.temp("not-fits-with-unicode.fits")
with open(filename, mode="w", encoding="utf=8") as f:
f.write("Ce\xe7i ne marche pas")
# This should raise an OSError because there is no end card.
with pytest.raises(
OSError,
match=(
"No SIMPLE card found, this file "
"does not appear to be a valid FITS file"
),
):
fits.open(filename)
def test_no_resource_warning_raised_on_non_fits_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6168
The ResourceWarning shows up when (in python 3+) you try to
open a non-FITS file when using a filename.
"""
# To avoid creating the file multiple times the tests are
# all included in one test file. See the discussion to the
# PR at https://github.com/astropy/astropy/issues/6168
#
filename = self.temp("not-fits.fits")
with open(filename, mode="w") as f:
f.write("# header line\n")
f.write("0.1 0.2\n")
# Opening the file should raise an OSError however the file
# is opened (there are two distinct code paths, depending on
# whether ignore_missing_end is True or False).
#
# Explicit tests are added to make sure the file handle is not
# closed when passed in to fits.open. In this case the ResourceWarning
# was not raised.
# Make sure that files opened by the user are not closed
with open(filename, mode="rb") as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=False)
assert not f.closed
with open(filename, mode="rb") as f:
with pytest.raises(OSError), pytest.warns(VerifyWarning):
fits.open(f, ignore_missing_end=True)
assert not f.closed
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=False)
with pytest.raises(OSError), pytest.warns(VerifyWarning):
fits.open(filename, ignore_missing_end=True)
def test_pop_with_lazy_load(self):
filename = self.data("checksum.fits")
with fits.open(filename) as hdul:
# Try popping the hdulist before doing anything else. This makes sure
# that https://github.com/astropy/astropy/issues/7185 is fixed.
hdu = hdul.pop()
assert len(hdul) == 1
# Read the file again and try popping from the beginning
with fits.open(filename) as hdul2:
hdu2 = hdul2.pop(0)
assert len(hdul2) == 1
# Just a sanity check
with fits.open(filename) as hdul3:
assert len(hdul3) == 2
assert hdul3[0].header == hdu2.header
assert hdul3[1].header == hdu.header
def test_pop_extname(self):
with fits.open(self.data("o4sp040b0_raw.fits")) as hdul:
assert len(hdul) == 7
hdu1 = hdul[1]
hdu4 = hdul[4]
hdu_popped = hdul.pop(("SCI", 2))
assert len(hdul) == 6
assert hdu_popped is hdu4
hdu_popped = hdul.pop("SCI")
assert len(hdul) == 5
assert hdu_popped is hdu1
# Skip due to https://github.com/astropy/astropy/issues/8916
@pytest.mark.skipif(
sys.platform.startswith("win32"), reason="Cannot test on Windows"
)
def test_write_hdulist_to_stream(self):
"""
Unit test for https://github.com/astropy/astropy/issues/7435
to ensure that an HDUList can be written to a stream.
"""
data = np.array([[1, 2, 3], [4, 5, 6]])
hdu = fits.PrimaryHDU(data)
hdulist = fits.HDUList([hdu])
with open(self.temp("test.fits"), "wb") as fout:
with subprocess.Popen(["cat"], stdin=subprocess.PIPE, stdout=fout) as p:
hdulist.writeto(p.stdin)
def test_output_verify(self):
hdul = fits.HDUList([fits.PrimaryHDU()])
hdul[0].header["FOOBAR"] = 42
hdul.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
data = f.read()
# create invalid card
data = data.replace(b"FOOBAR =", b"FOOBAR = ")
with open(self.temp("test2.fits"), "wb") as f:
f.write(data)
with pytest.raises(VerifyError):
with fits.open(self.temp("test2.fits"), mode="update") as hdul:
hdul[0].header["MORE"] = "here"
with pytest.warns(VerifyWarning) as ww:
with fits.open(
self.temp("test2.fits"), mode="update", output_verify="fix+warn"
) as hdul:
hdul[0].header["MORE"] = "here"
assert len(ww) == 6
msg = "Card 'FOOBAR ' is not FITS standard (equal sign not at column 8)"
assert msg in str(ww[3].message)
|
952267d4521ff5e53b62cc88e257fe1b18a0b1d9d412ef4adf246c108ce7c622 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import warnings
from io import BytesIO, StringIO
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.card import _pad
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import encode_ascii
from astropy.io.fits.verify import VerifyError, VerifyWarning
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([("a", 1), ("b", 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header["c"] = 100
assert "c" not in copied_header
# and changing the copy should not change the original.
copied_header["a"] = 0
assert original_header["a"] == 1
def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([("a", 10)])
new_header = fits.Header(original_header, copy=True)
original_header["a"] = 20
assert new_header["a"] == 10
new_header["a"] = 0
assert original_header["a"] == 20
def test_init_with_dict():
dict1 = {"a": 11, "b": 12, "c": 13, "d": 14, "e": 15}
h1 = fits.Header(dict1)
for i in dict1:
assert dict1[i] == h1[i]
def test_init_with_ordereddict():
# Create a list of tuples. Each tuple consisting of a letter and the number
list1 = [(i, j) for j, i in enumerate("abcdefghijklmnopqrstuvwxyz")]
# Create an ordered dictionary and a header from this dictionary
dict1 = collections.OrderedDict(list1)
h1 = fits.Header(dict1)
# Check that the order is preserved of the initial list
assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))
class TestHeaderFunctions(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
header.rename_keyword("A", "B")
assert "A" not in header
assert "B" in header
assert header[0] == "B"
assert header["B"] == "B"
assert header.comments["B"] == "C"
@pytest.mark.parametrize("key", ["A", "a"])
def test_indexing_case(self, key):
"""Check that indexing is case insensitive"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
assert key in header
assert header[key] == "B"
assert header.get(key) == "B"
assert header.index(key) == 0
assert header.comments[key] == "C"
assert header.count(key) == 1
header.remove(key, ignore_missing=False)
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert c.keyword == ""
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == "ABC"
assert c.value == "abc"
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card("abc", "<8 ch")
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card("nullstr", "")
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring("ABC = F")
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card("long_int", -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card("floatnum", -467374636747637647347374734737437.0)
if str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and str(c) != _pad(
"FLOATNUM= -4.6737463674763E+032"
):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_floating_point_string_representation_card(self):
"""
Ensures Card formats float values with the correct precision, avoiding
comment truncation
Regression test for https://github.com/astropy/astropy/issues/14507
"""
k = "HIERARCH ABC DEF GH IJKLMN"
com = "[m] abcdef ghijklm nopqrstu vw xyzab"
c = fits.Card(k, 0.009125, com)
expected_str = f"{k} = 0.009125 / {com}"
assert str(c)[: len(expected_str)] == expected_str
c = fits.Card(k, 8.95, com)
expected_str = f"{k} = 8.95 / {com}"
assert str(c)[: len(expected_str)] == expected_str
c = fits.Card(k, -99.9, com)
expected_str = f"{k} = -99.9 / {com}"
assert str(c)[: len(expected_str)] == expected_str
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card("abc", (1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
c = fits.Card("abc", 9, "abcde" * 20)
with pytest.warns(fits.verify.VerifyWarning):
assert (
str(c) == "ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab"
)
c = fits.Card("abc", "a" * 68, "abcdefg")
with pytest.warns(fits.verify.VerifyWarning):
assert str(c) == f"ABC = '{'a' * 68}'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ("abc",), {"value": (2, 3)})
pytest.raises(ValueError, fits.Card, "key", [], "comment")
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
pytest.warns(UserWarning, fits.Card, "abcdefghi", "long")
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with pytest.warns(AstropyUserWarning) as w:
c = fits.Card("abc+", 9)
assert len(w) == 1
assert c.image == _pad("HIERARCH abc+ = 9")
def test_add_history(self):
header = fits.Header(
[
("A", "B", "C"),
("HISTORY", 1),
("HISTORY", 2),
("HISTORY", 3),
("", "", ""),
("", "", ""),
]
)
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header["HISTORY"] == [1, 2, 3, 4]
assert repr(header["HISTORY"]) == "1\n2\n3\n4"
header.add_history(0, after="A")
assert len(header) == 6
assert header.cards[1].value == 0
assert header["HISTORY"] == [0, 1, 2, 3, 4]
def test_add_blank(self):
header = fits.Header(
[("A", "B", "C"), ("", 1), ("", 2), ("", 3), ("", "", ""), ("", "", "")]
)
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[""] == [1, 2, 3, "", "", 4]
assert repr(header[""]) == "1\n2\n3\n\n\n4"
header.add_blank(0, after="A")
assert len(header) == 8
assert header.cards[1].value == 0
assert header[""] == [0, 1, 2, 3, "", "", 4]
header[""] = 5
header[" "] = 6
assert header[""] == [0, 1, 2, 3, "", "", 4, 5, 6]
assert header[" "] == [0, 1, 2, 3, "", "", 4, 5, 6]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({"FOO": ("BAR", "BAZ")})
header.update(FakeHeader([("A", 1), ("B", 2, "comment")]))
assert set(header.keys()) == {"FOO", "A", "B"}
assert header.comments["B"] == "comment"
# test that comments are preserved
tmphdr = fits.Header()
tmphdr["HELLO"] = (1, "this is a comment")
header.update(tmphdr)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO"}
assert header.comments["HELLO"] == "this is a comment"
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO", "NAXIS1", "NAXIS2"}
assert set(header.values()) == {"BAR", 1, 2, 100, 100}
def test_update_comment(self):
hdul = fits.open(self.data("arange.fits"))
hdul[0].header.update({"FOO": ("BAR", "BAZ")})
assert hdul[0].header["FOO"] == "BAR"
assert hdul[0].header.comments["FOO"] == "BAZ"
with pytest.raises(ValueError):
hdul[0].header.update({"FOO2": ("BAR", "BAZ", "EXTRA")})
hdul.writeto(self.temp("test.fits"))
hdul.close()
hdul = fits.open(self.temp("test.fits"), mode="update")
hdul[0].header.comments["FOO"] = "QUX"
hdul.close()
hdul = fits.open(self.temp("test.fits"))
assert hdul[0].header.comments["FOO"] == "QUX"
hdul[0].header.add_comment(0, after="FOO")
assert str(hdul[0].header.cards[-1]).strip() == "COMMENT 0"
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad("HISTORY " + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad("COMMENT " + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value."
)
assert (
c.value == "card has no comments. "
"/ text after slash is still part of the value."
)
assert c.comment == ""
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part of the
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card("", " / EXPOSURE INFORMATION")
assert str(c) == _pad(" / EXPOSURE INFORMATION")
c = fits.Card.fromstring(str(c))
assert c.keyword == ""
assert c.value == " / EXPOSURE INFORMATION"
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring("ABC = (8, 9)")
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring("abc = + 2.1 e + 12")
assert c.value == 2100000000000.0
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes / let's also try the comment"
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert (
str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment "
)
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring("ABC = ")
assert str(c) == _pad("ABC =")
def test_undefined_keys_values(self):
header = fits.Header()
header["FOO"] = "BAR"
header["UNDEF"] = None
assert list(header.values()) == ["BAR", None]
assert list(header.items()) == [("FOO", "BAR"), ("UNDEF", None)]
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring("XYZ= 100")
assert c.keyword == "XYZ"
assert c.value == 100
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(AstropyUserWarning) as w:
c.verify()
err_text1 = "Card 'ABC' is not FITS standard (equal sign not at column 8)"
err_text2 = "Card 'ABC' is not FITS standard (invalid value string: 'a6'"
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
fix_text = "Fixed 'ABC' card to meet the FITS standard."
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(AstropyUserWarning, match=fix_text) as w:
c.verify("fix")
assert len(w) == 4
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
def test_long_string_value_with_multiple_long_words(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11298
"""
c = fits.Card(
"WHATEVER",
"SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_"
"03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY"
"_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml",
)
assert (
str(c)
== "WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'"
"CONTINUE '.h5 &' "
"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'"
"CONTINUE 'xml' "
)
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1["TEST"] = "abcdefg" * 30
h2 = fits.Header()
h2["TEST"] = "abcdefg" * 30
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header["TEST1"] = ("Regular value", "Regular comment")
header["TEST2"] = ("long string value " * 10, "long comment " * 10)
header["TEST3"] = ("Regular value", "Regular comment")
assert repr(header).splitlines() == [
str(fits.Card("TEST1", "Regular value", "Regular comment")),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card("TEST3", "Regular value", "Regular comment")),
]
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = "long string value " * 10
header = fits.Header()
header[""] = value
assert len(header) == 3
assert " ".join(header[""]) == value.rstrip()
# Ensure that this works like other commentary keywords
header["COMMENT"] = value
header["HISTORY"] = value
assert header["COMMENT"] == header["HISTORY"]
assert header["COMMENT"] == header[""]
def test_long_string_from_file(self):
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
c = hdul[0].header.cards["abc"]
hdul.close()
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card("abc", "longstringvalue" * 10, "longcomment" * 10)
assert (
str(c)
== "ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment "
)
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' / comments in line 1")
+ _pad(
"continue 'continue with long string but without the "
"ampersand at the end' /"
)
+ _pad(
"continue 'continue must have string value (with quotes)' "
"/ comments with ''. "
)
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert (
str(c)
== "ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. "
)
def test_long_string_value_with_quotes(self):
testval = "x" * 100 + "''"
c = fits.Card("TEST", testval)
c = fits.Card.fromstring(c.image)
assert c.value == testval
testval = "x" * 100 + "''xxx"
c = fits.Card("TEST", testval)
c = fits.Card.fromstring(c.image)
assert c.value == testval
testval = "x" * 100 + "'' xxx"
c = fits.Card("TEST", testval)
c = fits.Card.fromstring(c.image)
assert c.value == testval
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad(
"EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'"
)
+ _pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'")
+ _pad("CONTINUE '&' / pysyn expression")
)
assert c.keyword == "EXPR"
assert (
c.value == "/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits "
"* 5.87359e-12 * MWAvg(Av=0.12)"
)
assert c.comment == "pysyn expression"
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h["SVALUE"] = "A" * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card("TEST", "long value" * 10, "long comment &" * 10)
assert (
str(c)
== "TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & "
)
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with pytest.warns(
AstropyUserWarning, match="HIERARCH card will be created"
) as w:
c = fits.Card(
"ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert len(w) == 1
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
# Test manual creation of hierarch card
c = fits.Card("hierarch abcdefghi", 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card(
"HIERARCH ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_hierarch_not_warn(self):
"""Check that compressed image headers do not issue HIERARCH warnings."""
filename = fits.util.get_testdata_filepath("compressed_image.fits")
with fits.open(filename) as hdul:
header = hdul[1].header
with warnings.catch_warnings(record=True) as warning_list:
header["HIERARCH LONG KEYWORD"] = 42
assert len(warning_list) == 0
assert header["LONG KEYWORD"] == 42
assert header["HIERARCH LONG KEYWORD"] == 42
# Check that it still warns if we do not use HIERARCH
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["LONG KEYWORD2"] = 1
assert header["LONG KEYWORD2"] == 1
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring("HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
# Test also with creation via the Card constructor
c = fits.Card("HIERARCH key.META_4", "calFileVersion")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card("HIERARCH WeirdCard.~!@#_^$%&", "The value", "a comment")
# This should not raise any exceptions
c.verify("exception")
assert c.keyword == "WeirdCard.~!@#_^$%&"
assert c.value == "The value"
assert c.comment == "a comment"
# Test also the specific case from the original bug report
header = fits.Header(
[
("simple", True),
("BITPIX", 8),
("NAXIS", 0),
("EXTEND", True, "May contain datasets"),
("HIERARCH key.META_0", "detRow"),
]
)
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
header2 = hdul[0].header
assert str(header.cards[header.index("key.META_0")]) == str(
header2.cards[header2.index("key.META_0")]
)
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
# De-referencing header through the inline function should behave
# identically to accessing it in the pytest.raises context below.
pytest.raises(KeyError, lambda k: header[k], "NAXIS")
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."):
header["NAXIS"]
def test_hierarch_card_lookup(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
assert "abcdefghi" in header
assert header["abcdefghi"] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert "ABCDEFGHI" in header
def test_hierarch_card_delete(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
del header["hierarch abcdefghi"]
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["abcdefghi"] = 10
header["abcdefgh"] = 10
header["abcdefg"] = 10
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header["abcdefghij"]
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header[2]
assert list(header.keys())[2] == "abcdefg".upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLAH BLAH": "TESTA"})
assert len(w) == 0
assert "BLAH BLAH" in header
assert header["BLAH BLAH"] == "TESTA"
header.update({"HIERARCH BLAH BLAH": "TESTB"})
assert len(w) == 0
assert header["BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH": "TESTC"})
assert len(w) == 1
assert len(header) == 1
assert header["BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["blah blah"], "TESTD"
header.update({"blah blah": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["blah blah"], "TESTE"
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({"BLAH BLAH BLAH": "TESTA"})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({"HIERARCH BLAH BLAH BLAH": "TESTB"})
assert len(w) == 3
assert header["BLAH BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH BLAH": "TESTC"})
assert len(w) == 4
assert header["BLAH BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah blah": "TESTD"})
assert len(w) == 4
assert header["blah blah blah"], "TESTD"
header.update({"blah blah blah": "TESTE"})
assert len(w) == 5
assert header["blah blah blah"], "TESTE"
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLA BLA": "TESTA"})
assert len(w) == 0
assert "BLA BLA" in header
assert header["BLA BLA"] == "TESTA"
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 0
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 1
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTE"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({"BLA BLA": "TESTA"})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 1
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 2
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 3
assert len(header) == 1
assert header["bla bla"], "TESTE"
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header["FOO"] = ("bar", "baz", "qux")
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header["FOO"] = ("BAR",)
header["FOO2"] = (None,)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == ""
assert header.comments["FOO"] == ""
def test_header_setitem_2tuple(self):
header = fits.Header()
header["FOO"] = ("BAR", "BAZ")
header["FOO2"] = (None, None)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == "BAZ"
assert header.comments["FOO"] == "BAZ"
assert header.comments["FOO2"] == ""
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header["FOO"] = "BAR"
assert header["FOO"] == "BAR"
header["FOO"] = None
assert header["FOO"] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep="\n")
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header["UNDEF3"] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header["DEFINED"] == 42
assert header["UNDEF"] is None
assert header["UNDEF2"] is None
assert header["UNDEF3"] is None
assert header["UNDEF5"] is None
assert header["UNDEF6"] is None
# Assign an undefined value to a new card
header["UNDEF4"] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([("A", "B", "C")])
header.set("A", comment="D")
assert header["A"] == "B"
assert header.comments["A"] == "D"
def test_header_iter(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header) == ["A", "C"]
def test_header_slice(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
newheader = header[1:]
assert len(newheader) == 2
assert "A" not in newheader
assert "C" in newheader
assert "E" in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == "F"
assert newheader[1] == "D"
assert newheader[2] == "B"
newheader = header[::2]
assert len(newheader) == 2
assert "A" in newheader
assert "C" not in newheader
assert "E" in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = "GH"
assert header[1] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header[1:] = ["H", "I"]
assert header[1] == "H"
assert header[2] == "I"
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
del header[1:]
assert len(header) == 1
assert header[0] == "B"
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
newheader = header["AB*"]
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([("DATE", 1), ("DATE-OBS", 2), ("DATE-FOO", 3)])
assert len(header["DATE*"]) == 3
assert len(header["DATE?*"]) == 2
assert len(header["DATE-*"]) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header["AB*"] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header["AB*"] = "GH"
assert header[0] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header["AB*"] = ["H", "I"]
assert header[0] == "H"
assert header[2] == "I"
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
del header["AB*"]
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header(
[
("ABC", 0),
("HISTORY", 1),
("HISTORY", 2),
("DEF", 3),
("HISTORY", 4),
("HISTORY", 5),
]
)
assert header["HISTORY"] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([("A", "B"), ("C", "D")])
header.clear()
assert "A" not in header
assert "C" not in header
assert len(header) == 0
@pytest.mark.parametrize("fitsext", [fits.ImageHDU(), fits.CompImageHDU()])
def test_header_clear_write(self, fitsext):
hdulist = fits.HDUList([fits.PrimaryHDU(), fitsext])
hdulist[1].header["FOO"] = "BAR"
hdulist[1].header.clear()
with pytest.raises(VerifyError) as err:
hdulist.writeto(self.temp("temp.fits"), overwrite=True)
err_msg = "'XTENSION' card does not exist."
assert err_msg in str(err.value)
def test_header_fromkeys(self):
header = fits.Header.fromkeys(["A", "B"])
assert "A" in header
assert header["A"] is None
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] is None
assert header.comments["B"] == ""
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(["A", "B"], "C")
assert "A" in header
assert header["A"] == "C"
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] == "C"
assert header.comments["B"] == ""
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(["A"], ("B", "C"))
assert "A" in header
assert header["A"] == "B"
assert header.comments["A"] == "C"
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(["A", "B", "A"], "C")
assert "A" in header
assert ("A", 0) in header
assert ("A", 1) in header
assert ("A", 2) not in header
assert header[0] == "C"
assert header["A"] == "C"
assert header[("A", 0)] == "C"
assert header[2] == "C"
assert header[("A", 1)] == "C"
def test_header_items(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header.items()) == [("A", "B"), ("C", "D")]
def test_header_iterkeys(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.values(), ["B", "D"]):
assert a == b
def test_header_keys(self):
with fits.open(self.data("arange.fits")) as hdul:
assert list(hdul[0].header) == [
"SIMPLE",
"BITPIX",
"NAXIS",
"NAXIS1",
"NAXIS2",
"NAXIS3",
"EXTEND",
]
def test_header_list_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
last = header.pop()
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop(1)
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop(0)
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
pytest.raises(TypeError, header.pop, "A", "B", "C")
last = header.pop("G")
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop("C")
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop("A")
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
default = header.pop("X", "Y")
assert default == "Y"
assert len(header) == 1
pytest.raises(KeyError, header.pop, "X")
def test_popitem(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.setdefault("A") == "B"
assert header.setdefault("C") == "D"
assert header.setdefault("E") == "F"
assert len(header) == 3
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
assert "G" in header
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update({"A": "E", "F": "G"})
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([("A", "B"), ("C", "D")])
header.update(A="E", F="G")
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update([("A", "E"), fits.Card("F", "G")])
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header["MYKEY"] = ("some val", "some comment")
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == "XTENSION"
assert hdu.header[-1] == "some val"
assert ("MYKEY", 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == "some val"
assert hdu.header[-1] == "some other val"
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu2.header["HISTORY"] = "history 1"
hdu2.header["HISTORY"] = "history 2"
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) in hdu.header
assert hdu.header[("MYKEY", 1)] == "some other val"
assert len(hdu.header["HISTORY"]) == 3
assert hdu.header[-1] == "history 2"
hdu = fits.PrimaryHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) not in hdu.header
assert hdu.header["MYKEY"] == "some other val"
assert len(hdu.header["HISTORY"]) == 2
assert hdu.header[-1] == "history 2"
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data("test0.fits"))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.count("A") == 1
assert header.count("C") == 1
assert header.count("E") == 1
header["HISTORY"] = "a"
header["HISTORY"] = "b"
assert header.count("HISTORY") == 2
pytest.raises(KeyError, header.count, "G")
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ""
assert header[-2] == ""
# New card should fill the first blank by default
header.append(("E", "F"))
assert len(header) == 4
assert header[-2] == "F"
assert header[-1] == ""
# This card should not use up a blank spot
header.append(("G", "H"), useblanks=False)
assert len(header) == 5
assert header[-1] == ""
assert header[-2] == "H"
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.append("E")
assert len(header) == 3
assert list(header)[-1] == "E"
assert header[-1] is None
assert header.comments["E"] == ""
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append("")
assert len(header) == 4
assert list(header)[-1] == ""
assert header[""] == ""
assert header.comments[""] == ""
def test_header_insert_use_blanks(self):
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ("E", "F"))
assert len(header) == 4
assert header[1] == "F"
assert header[-1] == ""
assert header[-2] == "D"
# Insert a new card without using blanks
header.insert(1, ("G", "H"), useblanks=False)
assert len(header) == 5
assert header[1] == "H"
assert header[-1] == ""
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header(
[("NAXIS1", 10), ("COMMENT", "Comment 1"), ("COMMENT", "Comment 3")]
)
header.insert("NAXIS1", ("NAXIS", 2, "Number of axes"))
assert list(header.keys())[0] == "NAXIS"
assert header[0] == 2
assert header.comments[0] == "Number of axes"
header.insert("NAXIS1", ("NAXIS2", 20), after=True)
assert list(header.keys())[1] == "NAXIS1"
assert list(header.keys())[2] == "NAXIS2"
assert header[2] == 20
header.insert(("COMMENT", 1), ("COMMENT", "Comment 2"))
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3"]
header.insert(("COMMENT", 2), ("COMMENT", "Comment 4"), after=True)
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3", "Comment 4"]
header.insert(-1, ("TEST1", True))
assert list(header.keys())[-2] == "TEST1"
header.insert(-1, ("TEST2", True), after=True)
assert list(header.keys())[-1] == "TEST2"
assert list(header.keys())[-3] == "TEST1"
def test_remove(self):
header = fits.Header([("A", "B"), ("C", "D")])
# When keyword is present in the header it should be removed.
header.remove("C")
assert len(header) == 1
assert list(header) == ["A"]
assert "C" not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove("F")
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove("F", ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([("A", "B"), ("C", "D"), ("A", "F")])
header.remove("A", remove_all=True)
assert "A" not in header
assert len(header) == 1
assert list(header) == ["C"]
assert header[0] == "D"
def test_header_comments(self):
header = fits.Header([("A", "B", "C"), ("DEF", "G", "H")])
assert repr(header.comments) == " A C\n DEF H"
def test_comment_slices_and_filters(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
s = header.comments[1:]
assert list(s) == ["H", "K"]
s = header.comments[::-1]
assert list(s) == ["K", "H", "D"]
s = header.comments["A*"]
assert list(s) == ["D", "K"]
def test_comment_slice_filter_assign(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
header.comments[1:] = "L"
assert list(header.comments) == ["D", "L", "L"]
assert header.cards[header.index("AB")].comment == "D"
assert header.cards[header.index("EF")].comment == "L"
assert header.cards[header.index("AI")].comment == "L"
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ["L", "L", "D"]
header.comments["A*"] = ["M", "N"]
assert list(header.comments) == ["M", "L", "N"]
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header["HISTORY"] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header["HISTORY"][1:] == indices[1:]
assert header["HISTORY"][:3] == indices[:3]
assert header["HISTORY"][:6] == indices[:6]
assert header["HISTORY"][:-2] == indices[:-2]
assert header["HISTORY"][::-1] == indices[::-1]
assert header["HISTORY"][1::-1] == indices[1::-1]
assert header["HISTORY"][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ("A", "B", "C"))
header.append(("D", "E", "F"), end=True)
assert list(header["HISTORY"][1:]) == indices[1:]
assert list(header["HISTORY"][:3]) == indices[:3]
assert list(header["HISTORY"][:6]) == indices[:6]
assert list(header["HISTORY"][:-2]) == indices[:-2]
assert list(header["HISTORY"][::-1]) == indices[::-1]
assert list(header["HISTORY"][1::-1]) == indices[1::-1]
assert list(header["HISTORY"][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header["FOO"] = "BAR"
header["HISTORY"] = "ABC"
header["FRED"] = "BARNEY"
header["HISTORY"] = "DEF"
header["HISTORY"] = "GHI"
assert header["HISTORY"] == ["ABC", "DEF", "GHI"]
# Single value update
header["HISTORY"][0] = "FOO"
assert header["HISTORY"] == ["FOO", "DEF", "GHI"]
# Single value partial slice update
header["HISTORY"][1:] = "BAR"
assert header["HISTORY"] == ["FOO", "BAR", "BAR"]
# Multi-value update
header["HISTORY"][:] = ["BAZ", "QUX"]
assert header["HISTORY"] == ["BAZ", "QUX", "BAR"]
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header["HISTORY"] = "hello world"
header["HISTORY"] = "hello world"
header["COMMENT"] = "hello world"
assert header["HISTORY"] != header["COMMENT"]
header["COMMENT"] = "hello world"
assert header["HISTORY"] == header["COMMENT"]
def test_long_commentary_card(self):
header = fits.Header()
header["FOO"] = "BAR"
header["BAZ"] = "QUX"
longval = "ABC" * 30
header["HISTORY"] = longval
header["FRED"] = "BARNEY"
header["HISTORY"] = longval
assert len(header) == 7
assert list(header)[2] == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.set("HISTORY", longval, after="FOO")
assert len(header) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
header = fits.Header()
header.update({"FOO": "BAR"})
header.update({"BAZ": "QUX"})
longval = "ABC" * 30
header.add_history(longval)
header.update({"FRED": "BARNEY"})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.add_history(longval, after="FOO")
assert len(header.cards) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
def test_totxtfile(self, home_is_temp):
header_filename = self.temp("header.txt")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.totextfile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.totextfile(header_filename, overwrite=False)
hdul[0].header.totextfile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_tofile(self, home_is_temp):
"""
Repeat test_totxtfile, but with tofile()
"""
header_filename = self.temp("header.fits")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.tofile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.tofile(header_filename, overwrite=False)
hdul[0].header.tofile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711"""
filename = self.data("scale.fits")
hdr = fits.Header.fromfile(filename)
assert hdr["DATASET"] == "2MASS"
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header["A"] = ("B", "C")
header["B"] = ("C", "D")
header["C"] = ("D", "E")
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
f.write("\nEND")
new_header = fits.Header.fromtextfile(self.temp("test.hdr"))
assert "END" not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, "END", "")
pytest.raises(ValueError, header.append, "END")
pytest.raises(ValueError, header.append, "END", end=True)
pytest.raises(ValueError, header.insert, len(header), "END")
pytest.raises(ValueError, header.set, "END")
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep="", endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += " " * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appended by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header("END =", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header("END = ", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# END card with trailing gibberish
s = invalid_header("END$%&%^*%*", True)
with pytest.warns(
AstropyUserWarning,
match=r"Unexpected bytes trailing END keyword: '\$%&%\^\*%\*'",
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header("END", False)
with pytest.warns(
AstropyUserWarning, match="Missing padding to end of the FITS block"
) as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h["FOO"] = "BAR"
h["COMMENT"] = "hello"
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
out = f.read()
out = out.replace(b"hello", "héllo".encode("latin1"))
out = out.replace(b"BAR", "BÀR".encode("latin1"))
with open(self.temp("test2.fits"), "wb") as f2:
f2.write(out)
with pytest.warns(
AstropyUserWarning,
match="non-ASCII characters are present in the FITS file",
) as w:
h = fits.getheader(self.temp("test2.fits"))
assert h["FOO"] == "B?R"
assert h["COMMENT"] == "h?llo"
assert len(w) == 1
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([("A", "B"), ("B", "C"), ("C", "D")])
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after=0)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before="C")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after="A")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set("C", before=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("C", after=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep="\n")
# First the case that *does* work prior to fixing this issue
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards["FOCALLEN"]._modified
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards["APERTURE"]._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep="\n")
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards["FOCALLEN"]._modified
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards["APERTURE"]._modified
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h["FOCALLEN"] = 155.0
h["APERTURE"] = 0.0
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header["TEST"] = 5.0022221e-07
hdu.writeto(self.temp("test.fits"))
# Here we manually make the file invalid
with open(self.temp("test.fits"), "rb+") as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii("e"))
with fits.open(self.temp("test.fits")) as hdul, pytest.warns(
AstropyUserWarning
) as w:
hdul.writeto(self.temp("temp.fits"), output_verify="warn")
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad("FOO = T")
barimg = _pad("BAR = F")
h = fits.Header()
h["FOO"] = True
h["BAR"] = False
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h["FOO"] = np.bool_(True)
h["BAR"] = np.bool_(False)
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([("abC", 1), ("Def", 2), ("GeH", 3)])
assert list(h) == ["ABC", "DEF", "GEH"]
assert "abc" in h
assert "dEf" in h
assert h["geh"] == 3
# Case insensitivity of wildcards
assert len(h["g*"]) == 1
h["aBc"] = 2
assert h["abc"] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h["gEh"]
assert list(h) == ["ABC", "DEF"]
assert len(h) == 2
assert h.get("def") == 2
h.set("Abc", 3)
assert h["ABC"] == 3
h.set("gEh", 3, before="Abc")
assert list(h) == ["GEH", "ABC", "DEF"]
assert h.pop("abC") == 3
assert len(h) == 2
assert h.setdefault("def", 3) == 2
assert len(h) == 2
assert h.setdefault("aBc", 1) == 1
assert len(h) == 3
assert list(h) == ["GEH", "DEF", "ABC"]
h.update({"GeH": 1, "iJk": 4})
assert len(h) == 4
assert list(h) == ["GEH", "DEF", "ABC", "IJK"]
assert h["GEH"] == 1
assert h.count("ijk") == 1
assert h.index("ijk") == 3
h.remove("Def")
assert len(h) == 3
assert list(h) == ["GEH", "ABC", "IJK"]
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header["TESTKW"] = ("Test val", "This is the END")
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
hdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp("test.hdr"))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = "\u30a8\u30ea\u30c3\u30af"
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h["FOO"] = "BAR"
assert "FOO" in h
assert h["FOO"] == "BAR"
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, "BAR")
h["FOO"] = "BAZ"
assert h["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, "FOO", erikku)
h["FOO"] = ("BAR", "BAZ")
assert h["FOO"] == "BAR"
assert h.comments["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, "FOO", ("BAR", erikku))
pytest.raises(ValueError, assign, "FOO", (erikku, "BAZ"))
pytest.raises(ValueError, assign, "FOO", (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
with pytest.raises(ValueError, match="Illegal value: b'Hello'."):
h.set("TEST", b"Hello")
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h["FOO"] = "Bar "
assert h["FOO"] == "Bar"
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp("strip_header_whitespace", False):
assert h["FOO"] == "Bar "
assert h["QUX"] == "Bar "
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
assert h["FOO"] == "Bar"
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the original header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = [
"CCD parameters table ...",
" reference table oref$n951041ko_ccd.fits",
" INFLIGHT 12/07/2001 25/02/2002",
" all bias frames",
] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header["HISTORY"] = item
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header["HISTORY"] == hdu.header["HISTORY"]
new_hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring("CLFIND2D: contour = 0.30")
c2 = fits.Card.fromstring("Just some random text.")
c3 = fits.Card.fromstring("A" * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with pytest.warns(AstropyUserWarning) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp("test.fits"))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp("test.fits")) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert "CLFIND2D" in header
assert "Just som" in header
assert "AAAAAAAA" in header
assert header["CLFIND2D"] == ": contour = 0.30"
assert header["Just som"] == "e random text."
assert header["AAAAAAAA"] == "A" * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, "CLFIND2D", "foo")
pytest.raises(ValueError, header.set, "Just som", "foo")
pytest.raises(ValueError, header.set, "AAAAAAAA", "foo")
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring("HIERARCH ESO DET CHIP PXSPACE = 5e6")
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
c.verify("fix")
assert str(c) == _pad("HIERARCH ESO DET CHIP PXSPACE = 5E6")
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, "TEST", float("nan"))
pytest.raises(ValueError, h.set, "TEST", np.nan)
pytest.raises(ValueError, h.set, "TEST", np.float32("nan"))
pytest.raises(ValueError, h.set, "TEST", float("inf"))
pytest.raises(ValueError, h.set, "TEST", np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([("TEST", True)])
h["TEST"] = 1
assert h["TEST"] is not True
assert isinstance(h["TEST"], int)
assert h["TEST"] == 1
h["TEST"] = np.bool_(True)
assert h["TEST"] is True
h["TEST"] = False
assert h["TEST"] is False
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
h["TEST"] = 0
assert h["TEST"] is not False
assert isinstance(h["TEST"], int)
assert h["TEST"] == 0
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h["TEST"] = 1
# int -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# int -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# Now the same tests but with zeros
h["TEST"] = 0
# int -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
# int -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, "HISTORY", "\n")
pytest.raises(ValueError, h.set, "HISTORY", "\nabc")
pytest.raises(ValueError, h.set, "HISTORY", "abc\n")
pytest.raises(ValueError, h.set, "HISTORY", "abc\ndef")
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if "\n" in card_image:
pytest.raises(fits.VerifyError, c.verify, "exception")
else:
c.verify("exception")
def test_long_commentary_card_appended_to_header(self):
"""
If a HISTORY or COMMENT card with a too-long value is appended to a
header with Header.append (as opposed to assigning to hdr['HISTORY']
it fails verification.
Regression test for https://github.com/astropy/astropy/issues/11486
"""
header = fits.Header()
value = "abc" * 90
# this is what Table does when saving its history metadata key to a
# FITS file
header.append(("history", value))
assert len(header.cards) == 1
# Test Card._split() directly since this was the main problem area
key, val = header.cards[0]._split()
assert key == "HISTORY" and val == value
# Try writing adding this header to an HDU and writing it to a file
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"), overwrite=True)
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data("test0.fits"), "rb") as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data("test0.fits"))
assert pri_hdr["NAXIS"] == pri_hdr_from_bytes["NAXIS"]
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
def test_set_keyword_with_space(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10479
"""
hdr = fits.Header()
hdr["KEY2 "] = 2
hdr["KEY2 "] = 4
assert len(hdr) == 1
assert hdr["KEY2"] == 4
assert hdr["KEY2 "] == 4
def test_strip(self):
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr.strip()
assert set(hdr) == {"HISTORY", "FOO"}
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr = hdr.copy(strip=True)
assert set(hdr) == {"HISTORY", "FOO"}
def test_update_invalid_card(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5408
Tests updating the value of a card that is malformatted (with an
invalid value literal).
This tests two ways of reproducing the problem, one working with a
Card object directly, and one when reading/writing a header containing
such an invalid card.
"""
card = fits.Card.fromstring("KW = INF / Comment")
card.value = "FIXED"
assert tuple(card) == ("KW", "FIXED", "Comment")
card.verify("fix")
assert tuple(card) == ("KW", "FIXED", "Comment")
card = fits.Card.fromstring("KW = INF")
hdu = fits.PrimaryHDU()
# This is a loophole to write a header containing a malformatted card
card._verified = True
hdu.header.append(card)
hdu.header.tofile(self.temp("bogus.fits"))
with fits.open(self.temp("bogus.fits")) as hdul:
hdul[0].header["KW"] = -1
hdul.writeto(self.temp("bogus_fixed.fits"))
with fits.open(self.temp("bogus_fixed.fits")) as hdul:
assert hdul[0].header["KW"] == -1
def test_index_numpy_int(self):
header = fits.Header([("A", "FOO"), ("B", 2), ("C", "BAR")])
idx = np.int8(2)
assert header[idx] == "BAR"
header[idx] = "BAZ"
assert header[idx] == "BAZ"
header.insert(idx, ("D", 42))
assert header[idx] == 42
header.add_comment("HELLO")
header.add_comment("WORLD")
assert header["COMMENT"][np.int64(1)] == "WORLD"
header.append(("C", "BAZBAZ"))
assert header[("C", np.int16(0))] == "BAZ"
assert header[("C", np.uint32(1))] == "BAZBAZ"
def test_header_data_size(self):
"""
Tests data size calculation (w/o padding) given a Header.
"""
hdu = fits.PrimaryHDU()
header = hdu.header
assert header.data_size == 0
header["BITPIX"] = 32
header["NAXIS"] = 2
header["NAXIS1"] = 100
header["NAXIS2"] = 100
assert header.data_size == 40000
assert header.data_size_padded == 40320
class TestRecordValuedKeywordCards(FitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the
`FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
Note that extra leading spaces in the `value` fields should be parsed on input,
but will be stripped in the cards.
"""
def setup_method(self):
super().setup_method()
self._test_header = fits.Header()
self._test_header.set("DP1", "NAXIS: 2")
self._test_header.set("DP1", "AXIS.1: 1")
self._test_header.set("DP1", "AXIS.2: 2")
self._test_header.set("DP1", "NAUX: 2")
self._test_header.set("DP1", "AUX.1.COEFF.0: 0")
self._test_header.set("DP1", "AUX.1.POWER.0: 1")
self._test_header.set("DP1", "AUX.1.COEFF.1: 0.00048828125")
self._test_header.set("DP1", "AUX.1.POWER.1: 1")
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
assert c.comment == "A comment"
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.1
assert c.field_specifier == "NAXIS"
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == "DP1"
assert c.value == "NAXIS: a"
assert c.field_specifier is None
c = fits.Card("DP1", "NAXIS: 2")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1", "NAXIS: 2.0")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1", "NAXIS: a")
assert c.keyword == "DP1"
assert c.value == "NAXIS: a"
assert c.field_specifier is None
c = fits.Card("DP1.NAXIS", 2)
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1.NAXIS", 2.0)
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
with pytest.warns(fits.verify.VerifyWarning):
c = fits.Card("DP1.NAXIS", "a")
assert c.keyword == "DP1.NAXIS"
assert c.value == "a"
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == "NAXIS"
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.comment == "A comment"
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == "NAXIS"
c.field_specifier = "NAXIS1"
assert c.field_specifier == "NAXIS1"
assert c.keyword == "DP1.NAXIS1"
assert c.value == 2.0
assert c.comment == "A comment"
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set("abc.def", 1)
header.set("abc.DEF", 2)
assert header["abc.def"] == 1
assert header["ABC.def"] == 1
assert header["aBc.def"] == 1
assert header["ABC.DEF"] == 2
assert "ABC.dEf" not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header["DP1"] == "NAXIS: 2"
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header["DP1.NAXIS"] == 2.0
assert isinstance(self._test_header["DP1.NAXIS"], float)
assert self._test_header["DP1.AUX.1.COEFF.1"] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
pytest.raises(IndexError, lambda x: self._test_header[x], 8)
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'DP1\.AXIS\.3' not found."):
self._test_header["DP1.AXIS.3"]
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header["DP1.NAXIS"] == 3.0
assert isinstance(self._test_header["DP1.NAXIS"], float)
self._test_header["DP1.AXIS.1"] = 1.1
assert self._test_header["DP1.AXIS.1"] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h["D2IM1.EXTVER"] = 1
assert h["D2IM1.EXTVER"] == 1.0
h["D2IM1.EXTVER"] = 2
assert h["D2IM1.EXTVER"] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2"
c = fits.Card("DP1.NAXIS", 2)
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2.0"
c = fits.Card("DP1.NAXIS", 2.0)
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2.0"
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set("DP1", "AXIS.3: 1", "a comment", after="DP1.AXIS.2")
assert self._test_header[3] == 1
assert self._test_header["DP1.AXIS.3"] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header["DP1.AXIS.1"]
assert len(self._test_header) == 7
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.AXIS.2"
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header["DP1.AXIS.2"]
assert len(self._test_header) == 6
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.NAUX"
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header["DP1.AXIS.*"]
assert isinstance(cl, fits.Header)
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
]
cl = self._test_header["DP1.N*"]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'",
]
cl = self._test_header["DP1.AUX..."]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
cl = self._test_header["DP?.NAXIS"]
assert [str(c).strip() for c in cl.cards] == ["DP1 = 'NAXIS: 2'"]
cl = self._test_header["DP1.A*S.*"]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
]
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header["DP1.A*..."]
assert len(self._test_header) == 2
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.NAUX"
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header["DP1.A*..."]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
cl2 = cl["*.*AUX..."]
assert [str(c).strip() for c in cl2.cards] == [
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header["DP1.AXIS.*"]
assert list(cl) == ["DP1.AXIS.1", "DP1.AXIS.2"]
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header["DP1.AXIS.*"]
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header["DP1.AXIS.*"]
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not strictly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h["HISTORY"] = "AXIS.1: 2"
h["HISTORY"] = "AXIS.2: 2"
assert "HISTORY.AXIS" not in h
assert "HISTORY.AXIS.1" not in h
assert "HISTORY.AXIS.2" not in h
assert h["HISTORY"] == ["AXIS.1: 2", "AXIS.2: 2"]
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h["HISTORY"] = "Date: 2012-09-19T13:58:53.756061"
assert "HISTORY.Date" not in h
assert str(h.cards[0]) == _pad("HISTORY Date: 2012-09-19T13:58:53.756061")
c = fits.Card.fromstring(" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ""
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h["FOO"] = "Date: 2012-09-19T13:58:53.756061"
assert "FOO.Date" not in h
assert str(h.cards[0]) == _pad("FOO = 'Date: 2012-09-19T13:58:53.756061'")
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h["FOO"] == "AXIS.1: 2"
assert h[("FOO", 1)] == "AXIS.2: 4"
assert h["FOO.AXIS.1"] == 2.0
assert h["FOO.AXIS.2"] == 4.0
assert "FOO.AXIS" not in h
assert "FOO.AXIS." not in h
assert "FOO." not in h
pytest.raises(KeyError, lambda: h["FOO.AXIS"])
pytest.raises(KeyError, lambda: h["FOO.AXIS."])
pytest.raises(KeyError, lambda: h["FOO."])
def test_fitsheader_script(self):
"""Tests the basic functionality of the `fitsheader` script."""
from astropy.io.fits.scripts import fitsheader
# Can an extension by specified by the EXTNAME keyword?
hf = fitsheader.HeaderFormatter(self.data("zerowidth.fits"))
output = hf.parse(extensions=["AIPS FQ"])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX" in output
# Can we limit the display to one specific keyword?
output = hf.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX =" not in output
assert len(output.split("\n")) == 3
# Can we limit the display to two specific keywords?
output = hf.parse(extensions=[1], keywords=["EXTNAME", "BITPIX"])
assert "EXTNAME =" in output
assert "BITPIX =" in output
assert len(output.split("\n")) == 4
# Can we use wildcards for keywords?
output = hf.parse(extensions=[1], keywords=["NAXIS*"])
assert "NAXIS =" in output
assert "NAXIS1 =" in output
assert "NAXIS2 =" in output
hf.close()
# Can an extension by specified by the EXTNAME+EXTVER keywords?
hf = fitsheader.HeaderFormatter(self.data("test0.fits"))
assert "EXTNAME = 'SCI" in hf.parse(extensions=["SCI,2"])
hf.close()
# Can we print the original header before decompression?
hf = fitsheader.HeaderFormatter(self.data("comp.fits"))
assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1], compressed=False)
assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1], compressed=True)
hf.close()
def test_fitsheader_compressed_from_primary_image_ext(self):
"""Regression test for issue https://github.com/astropy/astropy/issues/7312"""
data = np.arange(2 * 2, dtype=np.int8).reshape((2, 2))
phdu = fits.PrimaryHDU(data=data)
chdu = fits.CompImageHDU(data=phdu.data, header=phdu.header)
chdu.writeto(self.temp("tmp2.fits"), overwrite=True)
with fits.open(self.temp("tmp2.fits")) as hdul:
assert "XTENSION" not in hdul[1].header
assert "PCOUNT" not in hdul[1].header
assert "GCOUNT" not in hdul[1].header
def test_fitsheader_table_feature(self):
"""Tests the `--table` feature of the `fitsheader` script."""
from astropy.io import fits
from astropy.io.fits.scripts import fitsheader
test_filename = self.data("zerowidth.fits")
formatter = fitsheader.TableHeaderFormatter(test_filename)
with fits.open(test_filename) as fitsobj:
# Does the table contain the expected number of rows?
mytable = formatter.parse([0])
assert len(mytable) == len(fitsobj[0].header)
# Repeat the above test when multiple HDUs are requested
mytable = formatter.parse(extensions=["AIPS FQ", 2, "4"])
assert len(mytable) == (
len(fitsobj["AIPS FQ"].header)
+ len(fitsobj[2].header)
+ len(fitsobj[4].header)
)
# Can we recover the filename and extension name from the table?
mytable = formatter.parse(extensions=["AIPS FQ"])
assert np.all(mytable["filename"] == test_filename)
assert np.all(mytable["hdu"] == "AIPS FQ")
assert mytable["value"][mytable["keyword"] == "EXTNAME"] == "AIPS FQ"
# Can we specify a single extension/keyword?
mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"])
assert len(mytable) == 1
assert mytable["hdu"][0] == "AIPS FQ"
assert mytable["keyword"][0] == "EXTNAME"
assert mytable["value"][0] == "AIPS FQ"
# Is an incorrect extension dealt with gracefully?
mytable = formatter.parse(extensions=["DOES_NOT_EXIST"])
assert mytable is None
# Is an incorrect keyword dealt with gracefully?
mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["DOES_NOT_EXIST"])
assert mytable is None
formatter.close()
@pytest.mark.parametrize("mode", ["wb", "wb+", "ab", "ab+"])
def test_hdu_writeto_mode(self, mode):
with open(self.temp("mode.fits"), mode=mode) as ff:
hdu = fits.ImageHDU(data=np.ones(5))
hdu.writeto(ff)
def test_subclass():
"""Check that subclasses don't get ignored on slicing and copying."""
class MyHeader(fits.Header):
def append(self, card, *args, **kwargs):
if isinstance(card, tuple) and len(card) == 2:
# Just for our checks we add a comment if there is none.
card += ("no comment",)
return super().append(card, *args, **kwargs)
my_header = MyHeader(
(
("a", 1.0, "first"),
("b", 2.0, "second"),
(
"c",
3.0,
),
)
)
assert my_header.comments["a"] == "first"
assert my_header.comments["b"] == "second"
assert my_header.comments["c"] == "no comment"
slice_ = my_header[1:]
assert type(slice_) is MyHeader
assert slice_.comments["b"] == "second"
assert slice_.comments["c"] == "no comment"
selection = my_header["c*"]
assert type(selection) is MyHeader
assert selection.comments["c"] == "no comment"
copy_ = my_header.copy()
assert type(copy_) is MyHeader
assert copy_.comments["b"] == "second"
assert copy_.comments["c"] == "no comment"
my_header.extend((("d", 4.0),))
assert my_header.comments["d"] == "no comment"
|
c849163b4875804e43d1383efb65c8486fbeb4ed29bff37d034955d541167627 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits._tiled_compression import compress_image_data
from .conftest import FitsTestCase
MAX_INT = np.iinfo(np.intc).max
MAX_LONG = np.iinfo(int).max
MAX_LONGLONG = np.iinfo(np.longlong).max
class TestCompressionFunction(FitsTestCase):
def test_wrong_argument_number(self):
with pytest.raises(TypeError):
compress_image_data(1, 2)
def test_unknown_compression_type(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header["ZCMPTYPE"] = "fun"
with pytest.raises(ValueError) as exc:
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
assert "Unrecognized compression type: fun" in str(exc.value)
def test_zbitpix_unknown(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header["ZBITPIX"] = 13
with pytest.raises(ValueError) as exc:
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
assert "Invalid value for BITPIX: 13" in str(exc.value)
def test_data_none(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu.data = None
with pytest.raises(TypeError) as exc:
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
assert "Image data must be a numpy.ndarray" in str(exc.value)
def test_missing_internal_header(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
del hdu._header
with pytest.raises(AttributeError) as exc:
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
assert "_header" in str(exc.value)
def test_invalid_tform(self):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header["TFORM1"] = "TX"
with pytest.raises(RuntimeError) as exc:
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
assert "TX" in str(exc.value) and "TFORM" in str(exc.value)
def test_invalid_zdither(self):
hdu = fits.CompImageHDU(np.ones((10, 10)), quantize_method=1)
hdu._header["ZDITHER0"] = "a"
with pytest.raises(TypeError):
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
@pytest.mark.parametrize("kw", ["ZNAXIS", "ZBITPIX"])
def test_header_missing_keyword(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
del hdu._header[kw]
with pytest.raises(KeyError) as exc:
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
assert kw in str(exc.value)
@pytest.mark.parametrize("kw", ["ZNAXIS", "ZVAL1", "ZVAL2", "ZBLANK", "BLANK"])
def test_header_value_int_overflow(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = MAX_INT + 1
with pytest.raises(OverflowError):
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
@pytest.mark.parametrize("kw", ["ZTILE1", "ZNAXIS1"])
def test_header_value_long_overflow(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = MAX_LONG + 1
with pytest.raises(OverflowError):
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
@pytest.mark.parametrize("kw", ["NAXIS1", "NAXIS2", "TNULL1", "PCOUNT", "THEAP"])
def test_header_value_longlong_overflow(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = MAX_LONGLONG + 1
with pytest.raises(OverflowError):
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
@pytest.mark.parametrize("kw", ["ZVAL3"])
def test_header_value_float_overflow(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = 1e300
with pytest.raises(OverflowError):
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
@pytest.mark.parametrize("kw", ["NAXIS1", "NAXIS2", "TFIELDS", "PCOUNT"])
def test_header_value_negative(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = -1
with pytest.raises(ValueError) as exc:
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
assert f"{kw} should not be negative." in str(exc.value)
@pytest.mark.parametrize(("kw", "limit"), [("ZNAXIS", 999), ("TFIELDS", 999)])
def test_header_value_exceeds_custom_limit(self, kw, limit):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = limit + 1
with pytest.raises(ValueError) as exc:
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
assert kw in str(exc.value)
@pytest.mark.parametrize(
"kw", ["TTYPE1", "TFORM1", "ZCMPTYPE", "ZNAME1", "ZQUANTIZ"]
)
def test_header_value_no_string(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = 1
with pytest.raises(TypeError):
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
@pytest.mark.parametrize("kw", ["TZERO1", "TSCAL1"])
def test_header_value_no_double(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10)))
hdu._header[kw] = "1"
with pytest.raises(TypeError):
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
@pytest.mark.parametrize("kw", ["ZSCALE", "ZZERO"])
def test_header_value_no_double_int_image(self, kw):
hdu = fits.CompImageHDU(np.ones((10, 10), dtype=np.int32))
hdu._header[kw] = "1"
with pytest.raises(TypeError):
compress_image_data(
hdu.data, hdu.compression_type, hdu._header, hdu.columns
)
|
e2eb8924dd0cc9a33aabe6da11ecdd8c26e5f15f574a66d6a55784f0e51c9c2e | import gc
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.io import fits
from astropy.io.fits import (
BinTableHDU,
HDUList,
ImageHDU,
PrimaryHDU,
connect,
table_to_hdu,
)
from astropy.io.fits.column import (
_fortran_to_python_format,
_parse_tdisp_format,
python_to_tdisp,
)
from astropy.io.tests.mixin_columns import compare_attrs, mixin_cols, serialized_names
from astropy.table import Column, QTable, Table
from astropy.table.table_helpers import simple_table
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.units.format.fits import UnitScaleError
from astropy.units.quantity import QuantityInfo
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
# FITS does not preserve precision, in_subfmt, and out_subfmt.
time_attrs = ["value", "shape", "format", "scale", "location"]
compare_attrs = {
name: (time_attrs if isinstance(col, Time) else compare_attrs[name])
for name, col in mixin_cols.items()
}
# FITS does not support multi-element location, array with object dtype,
# or logarithmic quantities.
unsupported_cols = {
name: col
for name, col in mixin_cols.items()
if (
isinstance(col, Time)
and col.location.shape != ()
or isinstance(col, np.ndarray)
and col.dtype.kind == "O"
or isinstance(col, u.LogQuantity)
)
}
mixin_cols = {
name: col for name, col in mixin_cols.items() if name not in unsupported_cols
}
def equal_data(a, b):
return all(np.all(a[name] == b[name]) for name in a.dtype.names)
class TestSingleTable:
def setup_class(self):
self.data = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "U1"), ("c", float)],
)
def test_simple(self, tmp_path):
filename = tmp_path / "test_simple.fts"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_pathlib(self, tmp_path):
filename = tmp_path / "test_simple.fit"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_meta(self, tmp_path):
filename = tmp_path / "test_simple.fits"
t1 = Table(self.data)
t1.meta["A"] = 1
t1.meta["B"] = 2.3
t1.meta["C"] = "spam"
t1.meta["comments"] = ["this", "is", "a", "long", "comment"]
t1.meta["HISTORY"] = ["first", "second", "third"]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
for key in t1.meta:
if isinstance(t1.meta, list):
for i in range(len(t1.meta[key])):
assert t1.meta[key][i] == t2.meta[key][i]
else:
assert t1.meta[key] == t2.meta[key]
def test_simple_meta_conflicting(self, tmp_path):
filename = tmp_path / "test_simple.fits"
t1 = Table(self.data)
t1.meta["ttype1"] = "spam"
with pytest.warns(
AstropyUserWarning,
match=(
"Meta-data keyword ttype1 "
"will be ignored since it conflicts with a FITS "
"reserved keyword"
),
) as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
def test_simple_noextension(self, tmp_path):
"""
Test that file type is recognized without extension
"""
filename = tmp_path / "test_simple"
t1 = Table(self.data)
t1.write(filename, overwrite=True, format="fits")
t2 = Table.read(filename)
assert equal_data(t1, t2)
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_with_units(self, table_type, tmp_path):
filename = tmp_path / "test_with_units.fits"
t1 = table_type(self.data)
t1["a"].unit = u.m
t1["c"].unit = u.km / u.s
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2["a"].unit == u.m
assert t2["c"].unit == u.km / u.s
def test_with_custom_units_qtable(self, tmp_path):
# Test only for QTable - for Table's Column, new units are dropped
# (as is checked in test_write_drop_nonstandard_units).
filename = tmp_path / "test_with_units.fits"
unit = u.def_unit("bandpass_sol_lum")
t = QTable()
t["l"] = np.ones(5) * unit
with pytest.warns(AstropyUserWarning) as w:
t.write(filename, overwrite=True)
assert len(w) == 1
assert "bandpass_sol_lum" in str(w[0].message)
# Just reading back, the data is fine but the unit is not recognized.
with pytest.warns(
u.UnitsWarning, match="'bandpass_sol_lum' did not parse"
) as w:
t2 = QTable.read(filename)
assert len(w) == 1
assert isinstance(t2["l"].unit, u.UnrecognizedUnit)
assert str(t2["l"].unit) == "bandpass_sol_lum"
assert np.all(t2["l"].value == t["l"].value)
# But if we enable the unit, it should be recognized.
with u.add_enabled_units(unit):
t3 = QTable.read(filename)
assert t3["l"].unit is unit
assert equal_data(t3, t)
# Regression check for #8897; write used to fail when a custom
# unit was enabled.
with pytest.warns(AstropyUserWarning):
t3.write(filename, overwrite=True)
# It should also be possible to read the file in using a unit alias,
# even to a unit that may not be the same.
with u.set_enabled_aliases({"bandpass_sol_lum": u.Lsun}):
t3 = QTable.read(filename)
assert t3["l"].unit is u.Lsun
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_read_with_unit_aliases(self, table_type):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = "Angstroms"
hdu.columns[2].unit = "ergs/(cm.s.Angstroms)"
with u.set_enabled_aliases({"Angstroms": u.AA, "ergs": u.erg}):
t = table_type.read(hdu)
assert t["a"].unit == u.AA
assert t["c"].unit == u.erg / (u.cm * u.s * u.AA)
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_with_format(self, table_type, tmp_path):
filename = tmp_path / "test_with_format.fits"
t1 = table_type(self.data)
t1["a"].format = "{:5d}"
t1["b"].format = "{:>20}"
t1["c"].format = "{:6.2f}"
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2["a"].format == "{:5d}"
assert t2["b"].format == "{:>20}"
assert t2["c"].format == "{:6.2f}"
def test_masked(self, tmp_path):
filename = tmp_path / "test_masked.fits"
t1 = Table(self.data, masked=True)
t1.mask["a"] = [1, 0, 1, 0]
t1.mask["b"] = [1, 0, 0, 1]
t1.mask["c"] = [0, 1, 1, 0]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
assert np.all(t1["a"].mask == t2["a"].mask)
assert np.all(t1["b"].mask == t2["b"].mask)
assert np.all(t1["c"].mask == t2["c"].mask)
@pytest.mark.parametrize("masked", [True, False])
def test_masked_nan(self, masked, tmp_path):
"""Check that masked values by default are replaced by NaN.
This should work for any shape and be independent of whether the
Table is formally masked or not.
"""
filename = tmp_path / "test_masked_nan.fits"
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1], dtype="f4")
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=["a", "b", "c"], masked=masked)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2["a"].data, [np.nan, 8.5, np.nan, 6.25])
assert_array_equal(t2["b"].data, [np.nan, 4.5, 6.75, np.nan])
assert_array_equal(
t2["c"].data, np.stack([t2["a"].data, t2["b"].data], axis=-1)
)
assert np.all(t1["a"].mask == t2["a"].mask)
assert np.all(t1["b"].mask == t2["b"].mask)
assert np.all(t1["c"].mask == t2["c"].mask)
def test_masked_serialize_data_mask(self, tmp_path):
filename = tmp_path / "test_masked_nan.fits"
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1])
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=["a", "b", "c"])
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2["a"].data, [5.25, 8.5, 3.75, 6.25])
assert_array_equal(t2["b"].data, [2.5, 4.5, 6.75, 8.875])
assert_array_equal(
t2["c"].data, np.stack([t2["a"].data, t2["b"].data], axis=-1)
)
assert np.all(t1["a"].mask == t2["a"].mask)
assert np.all(t1["b"].mask == t2["b"].mask)
assert np.all(t1["c"].mask == t2["c"].mask)
def test_read_from_fileobj(self, tmp_path):
filename = tmp_path / "test_read_from_fileobj.fits"
hdu = BinTableHDU(self.data)
hdu.writeto(filename, overwrite=True)
with open(filename, "rb") as f:
t = Table.read(f)
assert equal_data(t, self.data)
def test_read_with_nonstandard_units(self):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = "RADIANS"
hdu.columns[1].unit = "spam"
hdu.columns[2].unit = "millieggs"
with pytest.warns(u.UnitsWarning, match="did not parse as fits unit"):
t = Table.read(hdu)
assert equal_data(t, self.data)
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_write_drop_nonstandard_units(self, table_type, tmp_path):
# While we are generous on input (see above), we are strict on
# output, dropping units not recognized by the fits standard.
filename = tmp_path / "test_nonstandard_units.fits"
spam = u.def_unit("spam")
t = table_type()
t["a"] = [1.0, 2.0, 3.0] * spam
with pytest.warns(AstropyUserWarning, match="spam") as w:
t.write(filename)
assert len(w) == 1
if table_type is Table:
assert "cannot be recovered in reading. " in str(w[0].message)
else:
assert "lost to non-astropy fits readers" in str(w[0].message)
with fits.open(filename) as ff:
hdu = ff[1]
assert "TUNIT1" not in hdu.header
def test_memmap(self, tmp_path):
filename = tmp_path / "test_simple.fts"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, memmap=False)
t3 = Table.read(filename, memmap=True)
assert equal_data(t2, t3)
# To avoid issues with open files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
@pytest.mark.parametrize("memmap", (False, True))
def test_character_as_bytes(self, tmp_path, memmap):
filename = tmp_path / "test_simple.fts"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)
t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)
assert t2["b"].dtype.kind == "U"
assert t3["b"].dtype.kind == "S"
assert equal_data(t2, t3)
# To avoid issues with open files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
def test_oned_single_element(self, tmp_path):
filename = tmp_path / "test_oned_single_element.fits"
table = Table({"x": [[1], [2]]})
table.write(filename, overwrite=True)
read = Table.read(filename)
assert read["x"].shape == (2, 1)
assert len(read["x"][0]) == 1
def test_write_append(self, tmp_path):
t = Table(self.data)
hdu = table_to_hdu(t)
def check_equal(filename, expected, start_from=1):
with fits.open(filename) as hdu_list:
assert len(hdu_list) == expected
for hdu_table in hdu_list[start_from:]:
assert hdu_table.header == hdu.header
assert np.all(hdu_table.data == hdu.data)
filename = tmp_path / "test_write_append.fits"
t.write(filename, append=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Check the overwrite works correctly.
t.write(filename, append=True, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Normal write, check it's not appending.
t.write(filename, overwrite=True)
t.write(filename, overwrite=True)
check_equal(filename, 2)
# Now write followed by append, with different shaped tables.
t2 = Table(np.array([1, 2]))
t2.write(filename, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3, start_from=2)
assert equal_data(t2, Table.read(filename, hdu=1))
def test_write_overwrite(self, tmp_path):
t = Table(self.data)
filename = tmp_path / "test_write_overwrite.fits"
t.write(filename)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename)
t.write(filename, overwrite=True)
def test_mask_nans_on_read(self, tmp_path):
filename = tmp_path / "test_inexact_format_parse_on_read.fits"
c1 = fits.Column(name="a", array=np.array([1, 2, np.nan]), format="E")
table_hdu = fits.TableHDU.from_columns([c1])
table_hdu.writeto(filename)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
tab = Table.read(filename, mask_invalid=False)
assert tab.mask is None
# using memmap also deactivate the masking
tab = Table.read(filename, memmap=True)
assert tab.mask is None
def test_mask_null_on_read(self, tmp_path):
filename = tmp_path / "test_null_format_parse_on_read.fits"
col = fits.Column(
name="a",
array=np.array([1, 2, 99, 60000], dtype="u2"),
format="I",
null=99,
bzero=32768,
)
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
def test_mask_str_on_read(self, tmp_path):
filename = tmp_path / "test_null_format_parse_on_read.fits"
col = fits.Column(
name="a", array=np.array([b"foo", b"bar", b""], dtype="|S3"), format="A3"
)
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
tab = Table.read(filename, mask_invalid=False)
assert tab.mask is None
def test_heterogeneous_VLA_tables(self, tmp_path):
"""
Check the behaviour of heterogeneous VLA object.
"""
filename = tmp_path / "test_table_object.fits"
msg = "Column 'col1' contains unsupported object types or mixed types: "
# The column format fix the type of the arrays in the VLF object.
a = np.array([45, 30])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
tab = Table({"col1": var})
with pytest.raises(TypeError, match=msg):
tab.write(filename)
# Strings in the VLF object can't be added to the table
a = np.array(["five", "thirty"])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
with pytest.raises(TypeError, match=msg):
tab.write(filename)
def test_write_object_tables_with_unified(self, tmp_path):
"""
Write objects with the unified I/O interface.
See https://github.com/astropy/astropy/issues/1906
"""
filename = tmp_path / "test_table_object.fits"
msg = r"Column 'col1' contains unsupported object types or mixed types: {dtype\('O'\)}"
# Make a FITS table with an object column
tab = Table({"col1": [None]})
with pytest.raises(TypeError, match=msg):
tab.write(filename)
def test_write_VLA_tables_with_unified(self, tmp_path):
"""
Write VLA objects with the unified I/O interface.
See https://github.com/astropy/astropy/issues/11323
"""
filename = tmp_path / "test_table_VLA.fits"
# Make a FITS table with a variable-length array column
a = np.array([45, 30])
b = np.array([11, 12, 13])
c = np.array([45, 55, 65, 75])
var = np.array([a, b, c], dtype=object)
tabw = Table({"col1": var})
tabw.write(filename)
tab = Table.read(filename)
assert np.array_equal(tab[0]["col1"], np.array([45, 30]))
assert np.array_equal(tab[1]["col1"], np.array([11, 12, 13]))
assert np.array_equal(tab[2]["col1"], np.array([45, 55, 65, 75]))
class TestMultipleHDU:
def setup_class(self):
self.data1 = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "U1"), ("c", float)],
)
self.data2 = np.array(
list(zip([1.4, 2.3, 3.2, 4.7], [2.3, 4.5, 6.7, 8.9])),
dtype=[("p", float), ("q", float)],
)
self.data3 = np.array(
list(zip([1, 2, 3, 4], [2.3, 4.5, 6.7, 8.9])),
dtype=[("A", int), ("B", float)],
)
hdu0 = PrimaryHDU()
hdu1 = BinTableHDU(self.data1, name="first")
hdu2 = BinTableHDU(self.data2, name="second")
hdu3 = ImageHDU(np.ones((3, 3)), name="third")
hdu4 = BinTableHDU(self.data3)
self.hdus = HDUList([hdu0, hdu1, hdu2, hdu3, hdu4])
self.hdusb = HDUList([hdu0, hdu3, hdu2, hdu1])
self.hdus3 = HDUList([hdu0, hdu3, hdu2])
self.hdus2 = HDUList([hdu0, hdu1, hdu3])
self.hdus1 = HDUList([hdu0, hdu1])
def teardown_class(self):
del self.hdus
def setup_method(self, method):
warnings.filterwarnings("always")
def test_read(self, tmp_path):
filename = tmp_path / "test_read.fits"
self.hdus.writeto(filename)
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)",
):
t = Table.read(filename)
assert equal_data(t, self.data1)
filename = tmp_path / "test_read_2.fits"
self.hdusb.writeto(filename)
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)",
):
t3 = Table.read(filename)
assert equal_data(t3, self.data2)
def test_read_with_hdu_0(self, tmp_path):
filename = tmp_path / "test_read_with_hdu_0.fits"
self.hdus.writeto(filename)
with pytest.raises(ValueError) as exc:
Table.read(filename, hdu=0)
assert exc.value.args[0] == "No table found in hdu=0"
@pytest.mark.parametrize("hdu", [1, "first"])
def test_read_with_hdu_1(self, tmp_path, hdu):
filename = tmp_path / "test_read_with_hdu_1.fits"
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize("hdu", [2, "second"])
def test_read_with_hdu_2(self, tmp_path, hdu):
filename = tmp_path / "test_read_with_hdu_2.fits"
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize("hdu", [3, "third"])
def test_read_with_hdu_3(self, tmp_path, hdu):
filename = tmp_path / "test_read_with_hdu_3.fits"
self.hdus.writeto(filename)
with pytest.raises(ValueError, match="No table found in hdu=3"):
Table.read(filename, hdu=hdu)
def test_read_with_hdu_4(self, tmp_path):
filename = tmp_path / "test_read_with_hdu_4.fits"
self.hdus.writeto(filename)
t = Table.read(filename, hdu=4)
assert equal_data(t, self.data3)
@pytest.mark.parametrize("hdu", [2, 3, "1", "second", ""])
def test_read_with_hdu_missing(self, tmp_path, hdu):
filename = tmp_path / "test_warn_with_hdu_1.fits"
self.hdus1.writeto(filename)
with pytest.warns(
AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)",
):
t1 = Table.read(filename, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize("hdu", [0, 2, "third"])
def test_read_with_hdu_warning(self, tmp_path, hdu):
filename = tmp_path / "test_warn_with_hdu_2.fits"
self.hdus2.writeto(filename)
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)",
):
t2 = Table.read(filename, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize("hdu", [0, 1, "third"])
def test_read_in_last_hdu(self, tmp_path, hdu):
filename = tmp_path / "test_warn_with_hdu_3.fits"
self.hdus3.writeto(filename)
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)",
):
t3 = Table.read(filename, hdu=hdu)
assert equal_data(t3, self.data2)
def test_read_from_hdulist(self):
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)",
):
t = Table.read(self.hdus)
assert equal_data(t, self.data1)
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)",
):
t3 = Table.read(self.hdusb)
assert equal_data(t3, self.data2)
def test_read_from_hdulist_with_hdu_0(self):
with pytest.raises(ValueError) as exc:
Table.read(self.hdus, hdu=0)
assert exc.value.args[0] == "No table found in hdu=0"
@pytest.mark.parametrize("hdu", [1, "first", None])
def test_read_from_hdulist_with_single_table(self, hdu):
t = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize("hdu", [1, "first"])
def test_read_from_hdulist_with_hdu_1(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize("hdu", [2, "second"])
def test_read_from_hdulist_with_hdu_2(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize("hdu", [3, "third"])
def test_read_from_hdulist_with_hdu_3(self, hdu):
with pytest.raises(ValueError, match="No table found in hdu=3"):
Table.read(self.hdus, hdu=hdu)
@pytest.mark.parametrize("hdu", [0, 2, "third"])
def test_read_from_hdulist_with_hdu_warning(self, hdu):
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)",
):
t2 = Table.read(self.hdus2, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize("hdu", [2, 3, "1", "second", ""])
def test_read_from_hdulist_with_hdu_missing(self, hdu):
with pytest.warns(
AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)",
):
t1 = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize("hdu", [0, 1, "third"])
def test_read_from_hdulist_in_last_hdu(self, hdu):
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)",
):
t3 = Table.read(self.hdus3, hdu=hdu)
assert equal_data(t3, self.data2)
@pytest.mark.parametrize("hdu", [None, 1, "first"])
def test_read_from_single_hdu(self, hdu):
t = Table.read(self.hdus[1])
assert equal_data(t, self.data1)
def test_masking_regression_1795():
"""
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked.
"""
t = Table.read(get_pkg_data_filename("data/tb.fits"))
assert np.all(t["c1"].mask == np.array([False, False]))
assert not hasattr(t["c2"], "mask")
assert not hasattr(t["c3"], "mask")
assert not hasattr(t["c4"], "mask")
assert np.all(t["c1"].data == np.array([1, 2]))
assert np.all(t["c2"].data == np.array([b"abc", b"xy "]))
assert_allclose(t["c3"].data, np.array([3.70000007153, 6.6999997139]))
assert np.all(t["c4"].data == np.array([False, True]))
def test_scale_error():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ["x", "y", "z"]
t = Table([a, b, c], names=("a", "b", "c"), meta={"name": "first table"})
t["a"].unit = "1.2"
with pytest.raises(
UnitScaleError,
match=r"The column 'a' could not be "
r"stored in FITS format because it has a scale '\(1\.2\)'"
r" that is not recognized by the FITS standard\. Either "
r"scale the data or change the units\.",
):
t.write("t.fits", format="fits", overwrite=True)
@pytest.mark.parametrize(
"tdisp_str, format_return",
[
("EN10.5", ("EN", "10", "5", None)),
("F6.2", ("F", "6", "2", None)),
("B5.10", ("B", "5", "10", None)),
("E10.5E3", ("E", "10", "5", "3")),
("A21", ("A", "21", None, None)),
],
)
def test_parse_tdisp_format(tdisp_str, format_return):
assert _parse_tdisp_format(tdisp_str) == format_return
@pytest.mark.parametrize(
"tdisp_str, format_str_return",
[
("G15.4E2", "{:15.4g}"),
("Z5.10", "{:5x}"),
("I6.5", "{:6d}"),
("L8", "{:>8}"),
("E20.7", "{:20.7e}"),
],
)
def test_fortran_to_python_format(tdisp_str, format_str_return):
assert _fortran_to_python_format(tdisp_str) == format_str_return
@pytest.mark.parametrize(
"fmt_str, tdisp_str",
[
("{:3d}", "I3"),
("3d", "I3"),
("7.3f", "F7.3"),
("{:>4}", "A4"),
("{:7.4f}", "F7.4"),
("%5.3g", "G5.3"),
("%10s", "A10"),
("%.4f", "F13.4"),
],
)
def test_python_to_tdisp(fmt_str, tdisp_str):
assert python_to_tdisp(fmt_str) == tdisp_str
def test_logical_python_to_tdisp():
assert python_to_tdisp("{:>7}", logical_dtype=True) == "L7"
def test_bool_column(tmp_path):
"""
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table.
"""
arr = np.ones(5, dtype=bool)
arr[::2] = False
t = Table([arr])
t.write(tmp_path / "test.fits", overwrite=True)
with fits.open(tmp_path / "test.fits") as hdul:
assert hdul[1].data["col0"].dtype == np.dtype("bool")
assert np.all(hdul[1].data["col0"] == arr)
def test_unicode_column(tmp_path):
"""
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228
"""
t = Table([np.array(["a", "b", "cd"])])
t.write(tmp_path / "test.fits", overwrite=True)
with fits.open(tmp_path / "test.fits") as hdul:
assert np.all(hdul[1].data["col0"] == ["a", "b", "cd"])
assert hdul[1].header["TFORM1"] == "2A"
t2 = Table([np.array(["\N{SNOWMAN}"])])
with pytest.raises(UnicodeEncodeError):
t2.write(tmp_path / "test.fits", overwrite=True)
def test_unit_warnings_read_write(tmp_path):
filename = tmp_path / "test_unit.fits"
t1 = Table([[1, 2], [3, 4]], names=["a", "b"])
t1["a"].unit = "m/s"
t1["b"].unit = "not-a-unit"
with pytest.warns(
u.UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
with pytest.warns(
u.UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
Table.read(filename, hdu=1)
def test_convert_comment_convention():
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
filename = get_pkg_data_filename("data/stddata.fits")
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables are present",
):
t = Table.read(filename)
assert t.meta["comments"] == [
"",
" *** End of mandatory fields ***",
"",
"",
" *** Column names ***",
"",
"",
" *** Column formats ***",
"",
]
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = [
"info.name",
"info.format",
"info.unit",
"info.description",
"info.meta",
"info.dtype",
]
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split("."):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == "info.meta":
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == "f":
assert quantity_allclose(a1, a2, rtol=1e-15)
elif isinstance(a1, np.dtype):
# FITS does not perfectly preserve dtype: byte order can change, and
# unicode gets stored as bytes. So, we just check safe casting, to
# ensure we do not, e.g., accidentally change integer to float, etc.
if NUMPY_LT_1_22 and a1.names:
# For old numpy, can_cast does not deal well with structured dtype.
assert a1.names == a2.names
else:
assert np.can_cast(a2, a1, casting="safe")
else:
assert np.all(a1 == a2)
def test_fits_mixins_qtable_to_table(tmp_path):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmp_path / "test_simple.fits"
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format="fits")
t2 = Table.read(filename, format="fits", astropy_native=True)
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_fits_mixins_as_one(table_cls, tmp_path):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmp_path / "test_simple.fits"
names = sorted(mixin_cols)
# FITS stores times directly, so we just get the column back.
all_serialized_names = []
for name in sorted(mixin_cols):
all_serialized_names.extend(
[name] if isinstance(mixin_cols[name], Time) else serialized_names[name]
)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="fits")
t2 = table_cls.read(filename, format="fits", astropy_native=True)
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["HISTORY"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
# Read directly via fits and confirm column names
with fits.open(filename) as hdus:
assert hdus[1].columns.names == all_serialized_names
@pytest.mark.parametrize("name_col", list(mixin_cols.items()))
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_fits_mixins_per_column(table_cls, name_col, tmp_path):
"""Test write/read one col at a time and do detailed validation"""
filename = tmp_path / "test_simple.fits"
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "my \n\n\n description"
t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}}
if not t.has_mixin_columns:
pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)")
t.write(filename, format="fits")
t2 = table_cls.read(filename, format="fits", astropy_native=True)
if isinstance(col, Time):
# FITS Time does not preserve format
t2[name].format = col.format
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ["data"] if colname in ("c1", "c2") else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize("name_col", unsupported_cols.items())
@pytest.mark.xfail(reason="column type unsupported")
def test_fits_unsupported_mixin(self, name_col, tmp_path):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = tmp_path / "test_simple.fits"
name, col = name_col
Table([col], names=[name]).write(filename, format="fits")
def test_info_attributes_with_no_mixins(tmp_path):
"""Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized
"""
filename = tmp_path / "test.fits"
t = Table([[1.0, 2.0]])
t["col0"].description = "hello" * 40
t["col0"].format = "{:8.4f}"
t["col0"].meta["a"] = {"b": "c"}
t.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2["col0"].description == "hello" * 40
assert t2["col0"].format == "{:8.4f}"
assert t2["col0"].meta["a"] == {"b": "c"}
@pytest.mark.parametrize("method", ["set_cols", "names", "class"])
def test_round_trip_masked_table_serialize_mask(tmp_path, method):
"""
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct.
"""
filename = tmp_path / "test.fits"
t = simple_table(masked=True) # int, float, and str cols with one masked element
# MaskedColumn but no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about we test a column with no masked elements.
t["d"] = [1, 2, 3]
if method == "set_cols":
for col in t.itercols():
col.info.serialize_method["fits"] = "data_mask"
t.write(filename)
elif method == "names":
t.write(
filename,
serialize_method={
"a": "data_mask",
"b": "data_mask",
"c": "data_mask",
"d": "data_mask",
},
)
elif method == "class":
t.write(filename, serialize_method="data_mask")
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
def test_meta_not_modified(tmp_path):
filename = tmp_path / "test.fits"
t = Table(data=[Column([1, 2], "a", description="spam")])
t.meta["comments"] = ["a", "b"]
assert len(t.meta) == 1
t.write(filename)
assert len(t.meta) == 1
assert t.meta["comments"] == ["a", "b"]
def test_is_fits_gh_14305():
"""Regression test for https://github.com/astropy/astropy/issues/14305"""
assert not connect.is_fits("", "foo.bar", None)
|
578d2ba65484e4c680d7312ea7476a1dfec10ad2f823607a0566f424f73fdfab | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import errno
import gzip
import io
import mmap
import os
import pathlib
import shutil
import sys
import urllib.request
import zipfile
from unittest.mock import patch
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.convenience import _getext
from astropy.io.fits.diff import FITSDiff
from astropy.io.fits.file import GZIP_MAGIC, _File
from astropy.io.tests import safeio
from astropy.utils import data
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
from astropy.utils.data import conf
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
if HAS_BZ2:
import bz2
class TestCore(FitsTestCase):
def test_missing_file(self):
with pytest.raises(OSError):
fits.open(self.temp("does-not-exist.fits"))
def test_naxisj_check(self):
with fits.open(self.data("o4sp040b0_raw.fits")) as hdulist:
hdulist[1].header["NAXIS3"] = 500
assert "NAXIS3" in hdulist[1].header
hdulist.verify("silentfix")
assert "NAXIS3" not in hdulist[1].header
def test_byteswap(self):
p = fits.PrimaryHDU()
lst = fits.HDUList()
n = np.array([1, 60000, 0], dtype="u2").astype("i2")
c = fits.Column(name="foo", format="i2", bscale=1, bzero=32768, array=n)
t = fits.BinTableHDU.from_columns([c])
lst.append(p)
lst.append(t)
lst.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as p:
assert p[1].data[1]["foo"] == 60000.0
def test_fits_file_path_object(self):
"""
Testing when fits file is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(self.data("tdim.fits"))
with fits.open(fpath) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data("tdim.fits")) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_fits_pathlike_object(self):
"""
Testing when fits file is passed as os.PathLike object #11579.
"""
class TPath(os.PathLike):
def __init__(self, path):
self.path = path
def __fspath__(self):
return str(self.path)
fpath = TPath(self.data("tdim.fits"))
with fits.open(fpath) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data("tdim.fits")) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_fits_file_bytes_object(self):
"""
Testing when fits file is passed as bytes.
"""
with fits.open(self.data("tdim.fits").encode()) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data("tdim.fits")) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_add_del_columns(self):
p = fits.ColDefs([])
p.add_col(fits.Column(name="FOO", format="3J"))
p.add_col(fits.Column(name="BAR", format="1I"))
assert p.names == ["FOO", "BAR"]
p.del_col("FOO")
assert p.names == ["BAR"]
def test_add_del_columns2(self):
hdulist = fits.open(self.data("tb.fits"))
table = hdulist[1]
assert table.data.dtype.names == ("c1", "c2", "c3", "c4")
assert table.columns.names == ["c1", "c2", "c3", "c4"]
table.columns.del_col("c1")
assert table.data.dtype.names == ("c2", "c3", "c4")
assert table.columns.names == ["c2", "c3", "c4"]
table.columns.del_col("c3")
assert table.data.dtype.names == ("c2", "c4")
assert table.columns.names == ["c2", "c4"]
table.columns.add_col(fits.Column("foo", "3J"))
assert table.data.dtype.names == ("c2", "c4", "foo")
assert table.columns.names == ["c2", "c4", "foo"]
hdulist.writeto(self.temp("test.fits"), overwrite=True)
hdulist.close()
# NOTE: If you see a warning, might be related to
# https://github.com/spacetelescope/PyFITS/issues/44
with fits.open(self.temp("test.fits")) as hdulist:
table = hdulist[1]
assert table.data.dtype.names == ("c2", "c4", "foo")
assert table.columns.names == ["c2", "c4", "foo"]
def test_update_header_card(self):
"""A very basic test for the Header.update method--I'd like to add a
few more cases to this at some point.
"""
header = fits.Header()
comment = "number of bits per data pixel"
header["BITPIX"] = (16, comment)
assert "BITPIX" in header
assert header["BITPIX"] == 16
assert header.comments["BITPIX"] == comment
header.update(BITPIX=32)
assert header["BITPIX"] == 32
assert header.comments["BITPIX"] == ""
def test_set_card_value(self):
"""Similar to test_update_header_card(), but tests the the
`header['FOO'] = 'bar'` method of updating card values.
"""
header = fits.Header()
comment = "number of bits per data pixel"
card = fits.Card.fromstring(f"BITPIX = 32 / {comment}")
header.append(card)
header["BITPIX"] = 32
assert "BITPIX" in header
assert header["BITPIX"] == 32
assert header.cards[0].keyword == "BITPIX"
assert header.cards[0].value == 32
assert header.cards[0].comment == comment
def test_uint(self):
filename = self.data("o4sp040b0_raw.fits")
with fits.open(filename, uint=False) as hdulist_f:
with fits.open(filename, uint=True) as hdulist_i:
assert hdulist_f[1].data.dtype == np.float32
assert hdulist_i[1].data.dtype == np.uint16
assert np.all(hdulist_f[1].data == hdulist_i[1].data)
def test_fix_missing_card_append(self):
hdu = fits.ImageHDU()
errs = hdu.req_cards("TESTKW", None, None, "foo", "silentfix", [])
assert len(errs) == 1
assert "TESTKW" in hdu.header
assert hdu.header["TESTKW"] == "foo"
assert hdu.header.cards[-1].keyword == "TESTKW"
def test_fix_invalid_keyword_value(self):
hdu = fits.ImageHDU()
hdu.header["TESTKW"] = "foo"
errs = hdu.req_cards("TESTKW", None, lambda v: v == "foo", "foo", "ignore", [])
assert len(errs) == 0
# Now try a test that will fail, and ensure that an error will be
# raised in 'exception' mode
errs = hdu.req_cards(
"TESTKW", None, lambda v: v == "bar", "bar", "exception", []
)
assert len(errs) == 1
assert errs[0][1] == "'TESTKW' card has invalid value 'foo'."
# See if fixing will work
hdu.req_cards("TESTKW", None, lambda v: v == "bar", "bar", "silentfix", [])
assert hdu.header["TESTKW"] == "bar"
def test_unfixable_missing_card(self):
class TestHDU(fits.hdu.base.NonstandardExtHDU):
def _verify(self, option="warn"):
errs = super()._verify(option)
hdu.req_cards("TESTKW", None, None, None, "fix", errs)
return errs
@classmethod
def match_header(cls, header):
# Since creating this HDU class adds it to the registry we
# don't want the file reader to possibly think any actual
# HDU from a file should be handled by this class
return False
hdu = TestHDU(header=fits.Header())
with pytest.raises(fits.VerifyError):
hdu.verify("fix")
def test_exception_on_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header["XTENSION"]
with pytest.raises(fits.VerifyError):
hdu.verify("exception")
def test_ignore_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header["NAXIS"]
# The default here would be to issue a warning; ensure that no warnings
# or exceptions are raised
hdu.verify("ignore")
# Make sure the error wasn't fixed either, silently or otherwise
assert "NAXIS" not in hdu.header
def test_unrecognized_verify_option(self):
hdu = fits.ImageHDU()
with pytest.raises(ValueError):
hdu.verify("foobarbaz")
def test_errlist_basic(self):
# Just some tests to make sure that _ErrList is setup correctly.
# No arguments
error_list = fits.verify._ErrList()
assert error_list == []
# Some contents - this is not actually working, it just makes sure they
# are kept.
error_list = fits.verify._ErrList([1, 2, 3])
assert error_list == [1, 2, 3]
def test_combined_verify_options(self):
"""
Test verify options like fix+ignore.
"""
def make_invalid_hdu():
hdu = fits.ImageHDU()
# Add one keyword to the header that contains a fixable defect, and one
# with an unfixable defect
c1 = fits.Card.fromstring("test = ' test'")
c2 = fits.Card.fromstring("P.I. = ' Hubble'")
hdu.header.append(c1)
hdu.header.append(c2)
return hdu
# silentfix+ignore should be completely silent
hdu = make_invalid_hdu()
hdu.verify("silentfix+ignore")
# silentfix+warn should be quiet about the fixed HDU and only warn
# about the unfixable one
hdu = make_invalid_hdu()
with pytest.warns(AstropyUserWarning, match="Illegal keyword name") as w:
hdu.verify("silentfix+warn")
assert len(w) == 4
# silentfix+exception should only mention the unfixable error in the
# exception
hdu = make_invalid_hdu()
with pytest.raises(fits.VerifyError, match=r"Illegal keyword name") as excinfo:
hdu.verify("silentfix+exception")
assert "not upper case" not in str(excinfo.value)
# fix+ignore is not too useful, but it should warn about the fixed
# problems while saying nothing about the unfixable problems
hdu = make_invalid_hdu()
with pytest.warns(AstropyUserWarning, match="not upper case") as w:
hdu.verify("fix+ignore")
assert len(w) == 4
# fix+warn
hdu = make_invalid_hdu()
with pytest.warns(AstropyUserWarning) as w:
hdu.verify("fix+warn")
assert len(w) == 6
assert "not upper case" in str(w[2].message)
assert "Illegal keyword name" in str(w[4].message)
# fix+exception
hdu = make_invalid_hdu()
with pytest.raises(fits.VerifyError, match=r"Illegal keyword name") as excinfo:
hdu.verify("fix+exception")
assert "not upper case" in str(excinfo.value)
def test_getext(self):
"""
Test the various different ways of specifying an extension header in
the convenience functions.
"""
filename = self.data("test0.fits")
hl, ext = _getext(filename, "readonly", 1)
assert ext == 1
hl.close()
pytest.raises(ValueError, _getext, filename, "readonly", 1, 2)
pytest.raises(ValueError, _getext, filename, "readonly", (1, 2))
pytest.raises(ValueError, _getext, filename, "readonly", "sci", "sci")
pytest.raises(TypeError, _getext, filename, "readonly", 1, 2, 3)
hl, ext = _getext(filename, "readonly", ext=1)
assert ext == 1
hl.close()
hl, ext = _getext(filename, "readonly", ext=("sci", 2))
assert ext == ("sci", 2)
hl.close()
pytest.raises(
TypeError, _getext, filename, "readonly", 1, ext=("sci", 2), extver=3
)
pytest.raises(
TypeError, _getext, filename, "readonly", ext=("sci", 2), extver=3
)
hl, ext = _getext(filename, "readonly", "sci")
assert ext == ("sci", 1)
hl.close()
hl, ext = _getext(filename, "readonly", "sci", 1)
assert ext == ("sci", 1)
hl.close()
hl, ext = _getext(filename, "readonly", ("sci", 1))
assert ext == ("sci", 1)
hl.close()
hl, ext = _getext(
filename, "readonly", "sci", extver=1, do_not_scale_image_data=True
)
assert ext == ("sci", 1)
hl.close()
pytest.raises(TypeError, _getext, filename, "readonly", "sci", ext=1)
pytest.raises(TypeError, _getext, filename, "readonly", "sci", 1, extver=2)
hl, ext = _getext(filename, "readonly", extname="sci")
assert ext == ("sci", 1)
hl.close()
hl, ext = _getext(filename, "readonly", extname="sci", extver=1)
assert ext == ("sci", 1)
hl.close()
pytest.raises(TypeError, _getext, filename, "readonly", extver=1)
def test_extension_name_case_sensitive(self):
"""
Tests that setting fits.conf.extension_name_case_sensitive at
runtime works.
"""
hdu = fits.ImageHDU()
hdu.name = "sCi"
assert hdu.name == "SCI"
assert hdu.header["EXTNAME"] == "SCI"
with fits.conf.set_temp("extension_name_case_sensitive", True):
hdu = fits.ImageHDU()
hdu.name = "sCi"
assert hdu.name == "sCi"
assert hdu.header["EXTNAME"] == "sCi"
hdu.name = "sCi"
assert hdu.name == "SCI"
assert hdu.header["EXTNAME"] == "SCI"
def test_hdu_fromstring(self):
"""
Tests creating a fully-formed HDU object from a string containing the
bytes of the HDU.
"""
infile = self.data("test0.fits")
outfile = self.temp("test.fits")
with open(infile, "rb") as fin:
dat = fin.read()
offset = 0
with fits.open(infile) as hdul:
hdulen = hdul[0]._data_offset + hdul[0]._data_size
hdu = fits.PrimaryHDU.fromstring(dat[:hdulen])
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header == hdu.header
assert hdu.data is None
hdu.header["TEST"] = "TEST"
hdu.writeto(outfile)
with fits.open(outfile) as hdul:
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header[:-1] == hdu.header[:-1]
assert hdul[0].header["TEST"] == "TEST"
assert hdu.data is None
with fits.open(infile) as hdul:
for ext_hdu in hdul[1:]:
offset += hdulen
hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size
hdu = fits.ImageHDU.fromstring(dat[offset : offset + hdulen])
assert isinstance(hdu, fits.ImageHDU)
assert ext_hdu.header == hdu.header
assert (ext_hdu.data == hdu.data).all()
def test_nonstandard_hdu(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/157
Tests that "Nonstandard" HDUs with SIMPLE = F are read and written
without prepending a superfluous and unwanted standard primary HDU.
"""
data = np.arange(100, dtype=np.uint8)
hdu = fits.PrimaryHDU(data=data)
hdu.header["SIMPLE"] = False
hdu.writeto(self.temp("test.fits"))
info = [(0, "", 1, "NonstandardHDU", 5, (), "", "")]
with fits.open(self.temp("test.fits")) as hdul:
assert hdul.info(output=False) == info
# NonstandardHDUs just treat the data as an unspecified array of
# bytes. The first 100 bytes should match the original data we
# passed in...the rest should be zeros padding out the rest of the
# FITS block
assert (hdul[0].data[:100] == data).all()
assert (hdul[0].data[100:] == 0).all()
def test_extname(self):
"""Test getting/setting the EXTNAME of an HDU."""
h1 = fits.PrimaryHDU()
assert h1.name == "PRIMARY"
# Normally a PRIMARY HDU should not have an EXTNAME, though it should
# have a default .name attribute
assert "EXTNAME" not in h1.header
# The current version of the FITS standard does allow PRIMARY HDUs to
# have an EXTNAME, however.
h1.name = "NOTREAL"
assert h1.name == "NOTREAL"
assert h1.header.get("EXTNAME") == "NOTREAL"
# Updating the EXTNAME in the header should update the .name
h1.header["EXTNAME"] = "TOOREAL"
assert h1.name == "TOOREAL"
# If we delete an EXTNAME keyword from a PRIMARY HDU it should go back
# to the default
del h1.header["EXTNAME"]
assert h1.name == "PRIMARY"
# For extension HDUs the situation is a bit simpler:
h2 = fits.ImageHDU()
assert h2.name == ""
assert "EXTNAME" not in h2.header
h2.name = "HELLO"
assert h2.name == "HELLO"
assert h2.header.get("EXTNAME") == "HELLO"
h2.header["EXTNAME"] = "GOODBYE"
assert h2.name == "GOODBYE"
def test_extver_extlevel(self):
"""Test getting/setting the EXTVER and EXTLEVEL of and HDU."""
# EXTVER and EXTNAME work exactly the same; their semantics are, for
# now, to be inferred by the user. Although they should never be less
# than 1, the standard does not explicitly forbid any value so long as
# it's an integer
h1 = fits.PrimaryHDU()
assert h1.ver == 1
assert h1.level == 1
assert "EXTVER" not in h1.header
assert "EXTLEVEL" not in h1.header
h1.ver = 2
assert h1.header.get("EXTVER") == 2
h1.header["EXTVER"] = 3
assert h1.ver == 3
del h1.header["EXTVER"]
assert h1.ver == 1
h1.level = 2
assert h1.header.get("EXTLEVEL") == 2
h1.header["EXTLEVEL"] = 3
assert h1.level == 3
del h1.header["EXTLEVEL"]
assert h1.level == 1
pytest.raises(TypeError, setattr, h1, "ver", "FOO")
pytest.raises(TypeError, setattr, h1, "level", "BAR")
def test_consecutive_writeto(self):
"""
Regression test for an issue where calling writeto twice on the same
HDUList could write a corrupted file.
https://github.com/spacetelescope/PyFITS/issues/40 is actually a
particular instance of this problem, though isn't unique to sys.stdout.
"""
with fits.open(self.data("test0.fits")) as hdul1:
# Add a bunch of header keywords so that the data will be forced to
# new offsets within the file:
for idx in range(40):
hdul1[1].header[f"TEST{idx}"] = "test"
hdul1.writeto(self.temp("test1.fits"))
hdul1.writeto(self.temp("test2.fits"))
# Open a second handle to the original file and compare it to hdul1
# (We only compare part of the one header that was modified)
# Compare also with the second writeto output
with fits.open(self.data("test0.fits")) as hdul2:
with fits.open(self.temp("test2.fits")) as hdul3:
for hdul in (hdul1, hdul3):
for idx, hdus in enumerate(zip(hdul2, hdul)):
hdu2, hdu = hdus
if idx != 1:
assert hdu.header == hdu2.header
else:
assert hdu2.header == hdu.header[: len(hdu2.header)]
assert np.all(hdu.data == hdu2.data)
class TestConvenienceFunctions(FitsTestCase):
def test_writeto(self, home_is_temp):
"""
Simple test for writing a trivial header and some data to a file
with the `writeto()` convenience function.
"""
filename = self.temp("array.fits")
data = np.zeros((100, 100))
header = fits.Header()
fits.writeto(filename, data, header=header, overwrite=True)
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
def test_writeto_2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/107
Test of `writeto()` with a trivial header containing a single keyword.
"""
filename = self.temp("array.fits")
data = np.zeros((100, 100))
header = fits.Header()
header.set("CRPIX1", 1.0)
fits.writeto(
filename, data, header=header, overwrite=True, output_verify="silentfix"
)
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
assert "CRPIX1" in hdul[0].header
assert hdul[0].header["CRPIX1"] == 1.0
def test_writeto_overwrite(self, home_is_temp):
"""
Ensure the `overwrite` keyword works as it should
"""
filename = self.temp("array.fits")
data = np.zeros((100, 100))
header = fits.Header()
fits.writeto(filename, data, header=header)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
fits.writeto(filename, data, header=header, overwrite=False)
fits.writeto(filename, data, header=header, overwrite=True)
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
class TestFileFunctions(FitsTestCase):
"""
Tests various basic I/O operations, specifically in the
astropy.io.fits.file._File class.
"""
def test_open_nonexistent(self):
"""Test that trying to open a non-existent file results in an
OSError (and not some other arbitrary exception).
"""
with pytest.raises(OSError, match=r"No such file or directory"):
fits.open(self.temp("foobar.fits"))
# But opening in ostream or append mode should be okay, since they
# allow writing new files
for mode in ("ostream", "append"):
with fits.open(self.temp("foobar.fits"), mode=mode) as _:
pass
assert os.path.exists(self.temp("foobar.fits"))
os.remove(self.temp("foobar.fits"))
def test_open_file_handle(self):
# Make sure we can open a FITS file from an open file handle
with open(self.data("test0.fits"), "rb") as handle:
with fits.open(handle) as _:
pass
with open(self.temp("temp.fits"), "wb") as handle:
with fits.open(handle, mode="ostream") as _:
pass
# Opening without explicitly specifying binary mode should fail
with pytest.raises(ValueError):
with open(self.data("test0.fits")) as handle:
with fits.open(handle) as _:
pass
# All of these read modes should fail
for mode in ["r", "rt"]:
with pytest.raises(ValueError):
with open(self.data("test0.fits"), mode=mode) as handle:
with fits.open(handle) as _:
pass
# These update or write modes should fail as well
for mode in ["w", "wt", "w+", "wt+", "r+", "rt+", "a", "at", "a+", "at+"]:
with pytest.raises(ValueError):
with open(self.temp("temp.fits"), mode=mode) as handle:
with fits.open(handle) as _:
pass
def test_fits_file_handle_mode_combo(self):
# This should work fine since no mode is given
with open(self.data("test0.fits"), "rb") as handle:
with fits.open(handle) as _:
pass
# This should work fine since the modes are compatible
with open(self.data("test0.fits"), "rb") as handle:
with fits.open(handle, mode="readonly") as _:
pass
# This should not work since the modes conflict
with pytest.raises(ValueError):
with open(self.data("test0.fits"), "rb") as handle:
with fits.open(handle, mode="ostream") as _:
pass
def test_open_from_url(self):
file_url = "file:///" + self.data("test0.fits").lstrip("/")
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj) as _:
pass
# It will not be possible to write to a file that is from a URL object
for mode in ("ostream", "append", "update"):
with pytest.raises(ValueError):
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj, mode=mode) as _:
pass
@pytest.mark.remote_data(source="astropy")
def test_open_from_remote_url(self):
for dataurl in (conf.dataurl, conf.dataurl_mirror):
remote_url = f"{dataurl}/allsky/allsky_rosat.fits"
try:
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj) as fits_handle:
assert len(fits_handle) == 1
for mode in ("ostream", "append", "update"):
with pytest.raises(ValueError):
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj, mode=mode) as fits_handle:
assert len(fits_handle) == 1
except (urllib.error.HTTPError, urllib.error.URLError):
continue
else:
break
else:
raise Exception("Could not download file")
def test_open_gzipped(self):
gzip_file = self._make_gzip_file()
with fits.open(gzip_file) as fits_handle:
assert fits_handle._file.compression == "gzip"
assert len(fits_handle) == 5
with fits.open(gzip.GzipFile(gzip_file)) as fits_handle:
assert fits_handle._file.compression == "gzip"
assert len(fits_handle) == 5
def test_open_gzipped_from_handle(self):
with open(self._make_gzip_file(), "rb") as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == "gzip"
def test_detect_gzipped(self):
"""Test detection of a gzip file when the extension is not .gz."""
with fits.open(self._make_gzip_file("test0.fz")) as fits_handle:
assert fits_handle._file.compression == "gzip"
assert len(fits_handle) == 5
def test_writeto_append_mode_gzip(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/33
Check that a new GzipFile opened in append mode can be used to write
out a new FITS file.
"""
# Note: when opening a GzipFile the 'b+' is superfluous, but this was
# still how the original test case looked
# Note: with statement not supported on GzipFile in older Python
# versions
fileobj = gzip.GzipFile(self.temp("test.fits.gz"), "ab+")
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp("test.fits.gz")) as hdul:
assert hdul[0].header == h.header
def test_fits_update_mode_gzip(self):
"""Test updating a GZipped FITS file"""
with fits.open(self._make_gzip_file("update.gz"), mode="update") as fits_handle:
hdu = fits.ImageHDU(data=list(range(100)))
fits_handle.append(hdu)
with fits.open(self.temp("update.gz")) as new_handle:
assert len(new_handle) == 6
assert (new_handle[-1].data == list(range(100))).all()
def test_fits_append_mode_gzip(self):
"""Make sure that attempting to open an existing GZipped FITS file in
'append' mode raises an error"""
with pytest.raises(OSError):
with fits.open(self._make_gzip_file("append.gz"), mode="append") as _:
pass
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_open_bzipped(self):
bzip_file = self._make_bzip2_file()
with fits.open(bzip_file) as fits_handle:
assert fits_handle._file.compression == "bzip2"
assert len(fits_handle) == 5
with fits.open(bz2.BZ2File(bzip_file)) as fits_handle:
assert fits_handle._file.compression == "bzip2"
assert len(fits_handle) == 5
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_open_bzipped_from_handle(self):
with open(self._make_bzip2_file(), "rb") as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == "bzip2"
assert len(fits_handle) == 5
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_detect_bzipped(self):
"""Test detection of a bzip2 file when the extension is not .bz2."""
with fits.open(self._make_bzip2_file("test0.xx")) as fits_handle:
assert fits_handle._file.compression == "bzip2"
assert len(fits_handle) == 5
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_writeto_bzip2_fileobj(self):
"""Test writing to a bz2.BZ2File file like object"""
fileobj = bz2.BZ2File(self.temp("test.fits.bz2"), "w")
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp("test.fits.bz2")) as hdul:
assert hdul[0].header == h.header
@pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module")
def test_writeto_bzip2_filename(self):
"""Test writing to a bzip2 file by name"""
filename = self.temp("testname.fits.bz2")
h = fits.PrimaryHDU()
h.writeto(filename)
with fits.open(self.temp("testname.fits.bz2")) as hdul:
assert hdul[0].header == h.header
def test_open_zipped(self):
zip_file = self._make_zip_file()
with fits.open(zip_file) as fits_handle:
assert fits_handle._file.compression == "zip"
assert len(fits_handle) == 5
with fits.open(zipfile.ZipFile(zip_file)) as fits_handle:
assert fits_handle._file.compression == "zip"
assert len(fits_handle) == 5
def test_open_zipped_from_handle(self):
with open(self._make_zip_file(), "rb") as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == "zip"
assert len(fits_handle) == 5
def test_detect_zipped(self):
"""Test detection of a zip file when the extension is not .zip."""
zf = self._make_zip_file(filename="test0.fz")
with fits.open(zf) as fits_handle:
assert len(fits_handle) == 5
def test_open_zipped_writeable(self):
"""Opening zipped files in a writeable mode should fail."""
zf = self._make_zip_file()
pytest.raises(OSError, fits.open, zf, "update")
pytest.raises(OSError, fits.open, zf, "append")
zf = zipfile.ZipFile(zf, "a")
pytest.raises(OSError, fits.open, zf, "update")
pytest.raises(OSError, fits.open, zf, "append")
def test_read_open_astropy_gzip_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2774
This tests reading from a ``GzipFile`` object from Astropy's
compatibility copy of the ``gzip`` module.
"""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_multiple_member_zipfile(self):
"""
Opening zip files containing more than one member files should fail
as there's no obvious way to specify which file is the FITS file to
read.
"""
zfile = zipfile.ZipFile(self.temp("test0.zip"), "w")
zfile.write(self.data("test0.fits"))
zfile.writestr("foo", "bar")
zfile.close()
with pytest.raises(OSError):
fits.open(zfile.filename)
def test_read_open_file(self):
"""Read from an existing file object."""
with open(self.data("test0.fits"), "rb") as f:
assert len(fits.open(f)) == 5
def test_read_closed_file(self):
"""Read from an existing file object that's been closed."""
f = open(self.data("test0.fits"), "rb")
f.close()
with fits.open(f) as f2:
assert len(f2) == 5
def test_read_open_gzip_file(self):
"""Read from an open gzip file object."""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_gzip_file_for_writing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195."""
gf = self._make_gzip_file()
with fits.open(gf, mode="update") as h:
h[0].header["EXPFLAG"] = "ABNORMAL"
h[1].data[0, 0] = 1
with fits.open(gf) as h:
# Just to make sure the update worked; if updates work
# normal writes should work too...
assert h[0].header["EXPFLAG"] == "ABNORMAL"
assert h[1].data[0, 0] == 1
def test_write_read_gzip_file(self, home_is_temp):
"""
Regression test for https://github.com/astropy/astropy/issues/2794
Ensure files written through gzip are readable.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
hdu.writeto(self.temp("test.fits.gz"))
with open(os.path.expanduser(self.temp("test.fits.gz")), "rb") as f:
assert f.read(3) == GZIP_MAGIC
with fits.open(self.temp("test.fits.gz")) as hdul:
assert np.all(hdul[0].data == data)
@pytest.mark.parametrize("ext", ["gz", "bz2", "zip"])
def test_compressed_ext_but_not_compressed(self, ext):
testfile = self.temp(f"test0.fits.{ext}")
shutil.copy(self.data("test0.fits"), testfile)
with fits.open(testfile) as hdul:
assert len(hdul) == 5
fits.append(testfile, np.arange(5))
with fits.open(testfile) as hdul:
assert len(hdul) == 6
def test_read_file_like_object(self):
"""Test reading a FITS file from a file-like object."""
filelike = io.BytesIO()
with open(self.data("test0.fits"), "rb") as f:
filelike.write(f.read())
filelike.seek(0)
assert len(fits.open(filelike)) == 5
def test_updated_file_permissions(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79
Tests that when a FITS file is modified in update mode, the file
permissions are preserved.
"""
filename = self.temp("test.fits")
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul.writeto(filename)
old_mode = os.stat(filename).st_mode
hdul = fits.open(filename, mode="update")
hdul.insert(1, fits.ImageHDU())
hdul.flush()
hdul.close()
assert old_mode == os.stat(filename).st_mode
def test_fileobj_mode_guessing(self):
"""Tests whether a file opened without a specified io.fits mode
('readonly', etc.) is opened in a mode appropriate for the given file
object.
"""
self.copy_file("test0.fits")
# Opening in text mode should outright fail
for mode in ("r", "w", "a"):
with open(self.temp("test0.fits"), mode) as f:
pytest.raises(ValueError, fits.HDUList.fromfile, f)
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file("test0.fits")
with open(self.temp("test0.fits"), "rb") as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)["filemode"] == "readonly"
for mode in ("wb", "ab"):
with open(self.temp("test0.fits"), mode) as f:
with fits.HDUList.fromfile(f) as h:
# Basically opening empty files for output streaming
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file("test0.fits")
with open(self.temp("test0.fits"), "wb+") as f:
with fits.HDUList.fromfile(f) as h:
# wb+ still causes an existing file to be overwritten so there
# are no HDUs
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file("test0.fits")
with open(self.temp("test0.fits"), "rb+") as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)["filemode"] == "update"
with open(self.temp("test0.fits"), "ab+") as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)["filemode"] == "append"
def test_mmap_unwriteable(self):
"""Regression test for https://github.com/astropy/astropy/issues/968
Temporarily patches mmap.mmap to exhibit platform-specific bad
behavior.
"""
class MockMmap(mmap.mmap):
def flush(self):
raise OSError("flush is broken on this platform")
old_mmap = mmap.mmap
mmap.mmap = MockMmap
# Force the mmap test to be rerun
_File.__dict__["_mmap_available"]._cache.clear()
try:
self.copy_file("test0.fits")
with pytest.warns(
AstropyUserWarning, match=r"mmap\.flush is unavailable"
) as w:
with fits.open(
self.temp("test0.fits"), mode="update", memmap=True
) as h:
h[1].data[0, 0] = 999
assert len(w) == 1
# Double check that writing without mmap still worked
with fits.open(self.temp("test0.fits")) as h:
assert h[1].data[0, 0] == 999
finally:
mmap.mmap = old_mmap
_File.__dict__["_mmap_available"]._cache.clear()
def test_mmap_allocate_error(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1380
Temporarily patches mmap.mmap to raise an OSError if mode is ACCESS_COPY.
"""
mmap_original = mmap.mmap
# We patch mmap here to raise an error if access=mmap.ACCESS_COPY, which
# emulates an issue that an OSError is raised if the available address
# space is less than the size of the file even if memory mapping is used.
def mmap_patched(*args, **kwargs):
if kwargs.get("access") == mmap.ACCESS_COPY:
exc = OSError()
exc.errno = errno.ENOMEM
raise exc
else:
return mmap_original(*args, **kwargs)
with fits.open(self.data("test0.fits"), memmap=True) as hdulist:
with patch.object(mmap, "mmap", side_effect=mmap_patched) as p:
with pytest.warns(
AstropyUserWarning,
match=r"Could not memory map array with mode='readonly'",
):
data = hdulist[1].data
p.reset_mock()
assert not data.flags.writeable
def test_mmap_closing(self):
"""
Tests that the mmap reference is closed/removed when there aren't any
HDU data references left.
"""
if not _File._mmap_available:
pytest.xfail("not expected to work on platforms without mmap support")
with fits.open(self.data("test0.fits"), memmap=True) as hdul:
assert hdul._file._mmap is None
hdul[1].data
assert hdul._file._mmap is not None
del hdul[1].data
# Should be no more references to data in the file so close the
# mmap
assert hdul._file._mmap is None
hdul[1].data
hdul[2].data
del hdul[1].data
# hdul[2].data is still references so keep the mmap open
assert hdul._file._mmap is not None
del hdul[2].data
assert hdul._file._mmap is None
assert hdul._file._mmap is None
with fits.open(self.data("test0.fits"), memmap=True) as hdul:
hdul[1].data
# When the only reference to the data is on the hdu object, and the
# hdulist it belongs to has been closed, the mmap should be closed as
# well
assert hdul._file._mmap is None
with fits.open(self.data("test0.fits"), memmap=True) as hdul:
data = hdul[1].data
# also make a copy
data_copy = data.copy()
# The HDUList is closed; in fact, get rid of it completely
del hdul
# The data array should still work though...
assert np.all(data == data_copy)
def test_uncloseable_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2356
Demonstrates that FITS files can still be read from file-like objects
that don't have an obvious "open" or "closed" state.
"""
class MyFileLike:
def __init__(self, foobar):
self._foobar = foobar
def read(self, n):
return self._foobar.read(n)
def seek(self, offset, whence=os.SEEK_SET):
self._foobar.seek(offset, whence)
def tell(self):
return self._foobar.tell()
with open(self.data("test0.fits"), "rb") as f:
fileobj = MyFileLike(f)
with fits.open(fileobj) as hdul1:
with fits.open(self.data("test0.fits")) as hdul2:
assert hdul1.info(output=False) == hdul2.info(output=False)
for hdu1, hdu2 in zip(hdul1, hdul2):
assert hdu1.header == hdu2.header
if hdu1.data is not None and hdu2.data is not None:
assert np.all(hdu1.data == hdu2.data)
def test_write_bytesio_discontiguous(self):
"""
Regression test related to
https://github.com/astropy/astropy/issues/2794#issuecomment-55441539
Demonstrates that writing an HDU containing a discontiguous Numpy array
should work properly.
"""
data = np.arange(100)[::3]
hdu = fits.PrimaryHDU(data=data)
fileobj = io.BytesIO()
hdu.writeto(fileobj)
fileobj.seek(0)
with fits.open(fileobj) as h:
assert np.all(h[0].data == data)
def test_write_bytesio(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2463
Test against `io.BytesIO`. `io.StringIO` is not supported.
"""
self._test_write_string_bytes_io(io.BytesIO())
@pytest.mark.skipif(
sys.platform.startswith("win32"), reason="Cannot test on Windows"
)
def test_filename_with_colon(self):
"""
Test reading and writing a file with a colon in the filename.
Regression test for https://github.com/astropy/astropy/issues/3122
"""
# Skip on Windows since colons in filenames makes NTFS sad.
filename = "APEXHET.2014-04-01T15:18:01.000.fits"
hdu = fits.PrimaryHDU(data=np.arange(10))
hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert np.all(hdul[0].data == hdu.data)
def test_writeto_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to write an hdulist
to a full disk.
"""
def _writeto(self, array):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
msg = (
"Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file."
)
with pytest.raises(OSError, match=msg) as exc:
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writeto", _writeto)
monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir)
n = np.arange(0, 1000, dtype="int64")
hdu = fits.PrimaryHDU(n)
hdulist = fits.HDUList(hdu)
filename = self.temp("test.fits")
with open(filename, mode="wb") as fileobj:
hdulist.writeto(fileobj)
def test_flush_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to update an hdulist
to a full disk.
"""
filename = self.temp("test.fits")
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul[0].data = np.arange(0, 1000, dtype="int64")
hdul.writeto(filename)
def _writedata(self, fileobj):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writedata", _writedata)
monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir)
msg = (
"Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file."
)
with pytest.raises(OSError, match=msg) as exc:
with fits.open(filename, mode="update") as hdul:
hdul[0].data = np.arange(0, 1000, dtype="int64")
hdul.insert(1, fits.ImageHDU())
hdul.flush()
def _test_write_string_bytes_io(self, fileobj):
"""
Implemented for both test_write_stringio and test_write_bytesio.
"""
with fits.open(self.data("test0.fits")) as hdul:
hdul.writeto(fileobj)
hdul2 = fits.HDUList.fromstring(fileobj.getvalue())
assert FITSDiff(hdul, hdul2).identical
def _make_gzip_file(self, filename="test0.fits.gz"):
gzfile = self.temp(filename)
with open(self.data("test0.fits"), "rb") as f:
gz = gzip.open(gzfile, "wb")
gz.write(f.read())
gz.close()
return gzfile
def test_write_overwrite(self, home_is_temp):
filename = self.temp("test_overwrite.fits")
hdu = fits.PrimaryHDU(data=np.arange(10))
hdu.writeto(filename)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdu.writeto(filename)
hdu.writeto(filename, overwrite=True)
def _make_zip_file(self, mode="copyonwrite", filename="test0.fits.zip"):
zfile = zipfile.ZipFile(self.temp(filename), "w")
zfile.write(self.data("test0.fits"))
zfile.close()
return zfile.filename
def _make_bzip2_file(self, filename="test0.fits.bz2"):
bzfile = self.temp(filename)
with open(self.data("test0.fits"), "rb") as f:
bz = bz2.BZ2File(bzfile, "w")
bz.write(f.read())
bz.close()
return bzfile
def test_simulateonly(self):
"""Write to None simulates writing."""
with fits.open(self.data("test0.fits")) as hdul:
hdul.writeto(None)
hdul[0].writeto(None)
hdul[0].header.tofile(None)
def test_bintablehdu_zero_bytes(self):
"""Make sure we don't have any zero-byte writes in BinTableHDU"""
bright = np.rec.array(
[
(1, "Sirius", -1.45, "A1V"),
(2, "Canopus", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float32,a10",
names="order,name,mag,Sp",
)
hdu_non_zero = fits.BinTableHDU(bright)
# use safeio, a special file handler meant to fail on zero-byte writes
fh = safeio.CatchZeroByteWriter(open(self.temp("bright.fits"), mode="wb"))
hdu_non_zero.writeto(fh)
fh.close()
def test_primaryhdu_zero_bytes(self):
"""
Make sure we don't have any zero-byte writes from an ImageHDU
(or other) of `size % BLOCK_SIZE == 0`
"""
hdu_img_2880 = fits.PrimaryHDU(data=np.arange(720, dtype="i4"))
# use safeio, a special file handler meant to fail on zero-byte writes
fh = safeio.CatchZeroByteWriter(open(self.temp("image.fits"), mode="wb"))
hdu_img_2880.writeto(fh)
fh.close()
class TestStreamingFunctions(FitsTestCase):
"""Test functionality of the StreamingHDU class."""
def test_streaming_hdu(self, home_is_temp):
shdu = self._make_streaming_hdu(self.temp("new.fits"))
assert isinstance(shdu.size, int)
assert shdu.size == 100
arr = np.arange(25, dtype=np.int32).reshape((5, 5))
shdu.write(arr)
assert shdu.writecomplete
shdu.close()
with fits.open(self.temp("new.fits")) as hdul:
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_file_wrong_mode(self):
"""
Test that streaming an HDU to a file opened in the wrong mode fails as
expected.
"""
with pytest.raises(ValueError):
with open(self.temp("new.fits"), "wb") as f:
header = fits.Header()
fits.StreamingHDU(f, header)
def test_streaming_hdu_write_file(self):
"""Test streaming an HDU to an open file object."""
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp("new.fits"), "ab+") as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
with fits.open(self.temp("new.fits")) as hdul:
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_write_file_like(self):
"""Test streaming an HDU to an open file-like object."""
arr = np.zeros((5, 5), dtype=np.int32)
# The file-like object underlying a StreamingHDU must be in binary mode
sf = io.BytesIO()
shdu = self._make_streaming_hdu(sf)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_append_extension(self):
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp("new.fits"), "ab+") as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
# Doing this again should update the file with an extension
with open(self.temp("new.fits"), "ab+") as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
def test_fix_invalid_extname(self, capsys):
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU()
ihdu.header["EXTNAME"] = 12345678
hdul = fits.HDUList([phdu, ihdu])
filename = self.temp("temp.fits")
pytest.raises(
fits.VerifyError, hdul.writeto, filename, output_verify="exception"
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
hdul.writeto(filename, output_verify="fix")
with fits.open(filename):
assert hdul[1].name == "12345678"
assert hdul[1].header["EXTNAME"] == "12345678"
hdul.close()
def _make_streaming_hdu(self, fileobj):
hd = fits.Header()
hd["SIMPLE"] = (True, "conforms to FITS standard")
hd["BITPIX"] = (32, "array data type")
hd["NAXIS"] = (2, "number of array dimensions")
hd["NAXIS1"] = 5
hd["NAXIS2"] = 5
hd["EXTEND"] = True
return fits.StreamingHDU(fileobj, hd)
def test_blank_ignore(self):
with fits.open(self.data("blank.fits"), ignore_blank=True) as f:
assert f[0].data.flat[0] == 2
def test_error_if_memmap_impossible(self):
pth = self.data("blank.fits")
with fits.open(pth, memmap=True) as hdul:
with pytest.raises(ValueError):
hdul[0].data
# However, it should not fail if do_not_scale_image_data was used:
# See https://github.com/astropy/astropy/issues/3766
with fits.open(pth, memmap=True, do_not_scale_image_data=True) as hdul:
hdul[0].data # Just make sure it doesn't crash
|
bc7ce0f39c5fe0639a9b7ccc3c1d856c83eef121f03c1f09d20975449a4d052f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some methods related to ``CDS`` format
reader/writer.
Requires `pyyaml <https://pyyaml.org/>`_ to be installed.
"""
from io import StringIO
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import ascii
from astropy.table import Column, MaskedColumn, Table
from astropy.time import Time
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_almost_equal
test_dat = [
"names e d s i",
"HD81809 1E-7 22.25608 +2 67",
"HD103095 -31.6e5 +27.2500 -9E34 -30",
]
def test_roundtrip_mrt_table():
"""
Tests whether or not the CDS writer can roundtrip a table,
i.e. read a table to ``Table`` object and write it exactly
as it is back to a file. Since, presently CDS uses a
MRT format template while writing, only the Byte-By-Byte
and the data section of the table can be compared between
original and the newly written table.
Further, the CDS Reader does not have capability to recognize
column format from the header of a CDS/MRT table, so this test
can work for a limited set of simple tables, which don't have
whitespaces in the column values or mix-in columns. Because of
this the written table output cannot be directly matched with
the original file and have to be checked against a list of lines.
Masked columns are read properly though, and thus are being tested
during round-tripping.
The difference between ``cdsFunctional2.dat`` file and ``exp_output``
is the following:
* Metadata is different because MRT template is used for writing.
* Spacing between ``Label`` and ``Explanations`` column in the
Byte-By-Byte.
* Units are written as ``[cm.s-2]`` and not ``[cm/s2]``, since both
are valid according to CDS/MRT standard.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 7 A7 --- ID Star ID ",
" 9-12 I4 K Teff [4337/4654] Effective temperature ",
"14-17 F4.2 [cm.s-2] logg [0.77/1.28] Surface gravity ",
"19-22 F4.2 km.s-1 vturb [1.23/1.82] Micro-turbulence velocity",
"24-28 F5.2 [-] [Fe/H] [-2.11/-1.5] Metallicity ",
"30-33 F4.2 [-] e_[Fe/H] ? rms uncertainty on [Fe/H] ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"S05-5 4337 0.77 1.80 -2.07 ",
"S08-229 4625 1.23 1.23 -1.50 ",
"S05-10 4342 0.91 1.82 -2.11 0.14",
"S05-47 4654 1.28 1.74 -1.64 0.16",
]
dat = get_pkg_data_filename(
"data/cdsFunctional2.dat", package="astropy.io.ascii.tests"
)
t = Table.read(dat, format="ascii.mrt")
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
assert lines == exp_output
def test_write_byte_by_byte_units():
t = ascii.read(test_dat)
col_units = [None, u.C, u.kg, u.m / u.s, u.year]
t._set_column_attribute("unit", col_units)
# Add a column with magnitude units.
# Note that magnitude has to be assigned for each value explicitly.
t["magnitude"] = [u.Magnitude(25), u.Magnitude(-9)]
col_units.append(u.mag)
out = StringIO()
t.write(out, format="ascii.mrt")
# Read written table.
tRead = ascii.read(out.getvalue(), format="cds")
assert [tRead[col].unit for col in tRead.columns] == col_units
def test_write_readme_with_default_options():
exp_output = [
"Title:",
"Authors:",
"Table:",
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [-3160000.0/0.01] Description of e",
"16-23 F8.5 --- d [22.25/27.25] Description of d ",
"25-31 E7.1 --- s [-9e+34/2.0] Description of s ",
"33-35 I3 --- i [-30/67] Description of i ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 22.25608 2e+00 67",
"HD103095 -3e+06 27.25000 -9e+34 -30",
]
t = ascii.read(test_dat)
out = StringIO()
t.write(out, format="ascii.mrt")
assert out.getvalue().splitlines() == exp_output
def test_write_empty_table():
out = StringIO()
import pytest
with pytest.raises(NotImplementedError):
Table().write(out, format="ascii.mrt")
def test_write_null_data_values():
exp_output = [
"HD81809 1e-07 22.25608 2.0e+00 67",
"HD103095 -3e+06 27.25000 -9.0e+34 -30",
"Sun 5.3e+27 ",
]
t = ascii.read(test_dat)
t.add_row(
["Sun", "3.25", "0", "5.3e27", "2"], mask=[False, True, True, False, True]
)
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))]
lines = lines[i_secs[-1] + 1 :] # Last section is the data.
assert lines == exp_output
def test_write_byte_by_byte_for_masked_column():
"""
This test differs from the ``test_write_null_data_values``
above in that it tests the column value limits in the Byte-By-Byte
description section for columns whose values are masked.
It also checks the description for columns with same values.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [0.0/0.01]? Description of e ",
"16-17 F2.0 --- d ? Description of d ",
"19-25 E7.1 --- s [-9e+34/2.0] Description of s ",
"27-29 I3 --- i [-30/67] Description of i ",
"31-33 F3.1 --- sameF [5.0/5.0] Description of sameF",
"35-36 I2 --- sameI [20] Description of sameI ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 2e+00 67 5.0 20",
"HD103095 -9e+34 -30 5.0 20",
]
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name="sameF")
t.add_column([20, 20], name="sameI")
t["e"] = MaskedColumn(t["e"], mask=[False, True])
t["d"] = MaskedColumn(t["d"], mask=[True, True])
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
assert lines == exp_output
exp_coord_cols_output = {
# fmt: off
"generic": [
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-45 I2 h RAh Right Ascension (hour) ',
'47-48 I2 min RAm Right Ascension (minute) ',
'50-62 F13.10 s RAs Right Ascension (second) ',
' 64 A1 --- DE- Sign of Declination ',
'65-66 I2 deg DEd Declination (degree) ',
'68-69 I2 arcmin DEm Declination (arcmin) ',
'71-82 F12.9 arcsec DEs Declination (arcsec) ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 22 02 15.4500000000 -61 39 34.599996000',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
],
"positive_de": [
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 8 A8 --- names Description of names ',
'10-14 E5.1 --- e [-3160000.0/0.01] Description of e',
'16-23 F8.5 --- d [22.25/27.25] Description of d ',
'25-31 E7.1 --- s [-9e+34/2.0] Description of s ',
'33-35 I3 --- i [-30/67] Description of i ',
'37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ',
'41-42 I2 --- sameI [20] Description of sameI ',
'44-45 I2 h RAh Right Ascension (hour) ',
'47-48 I2 min RAm Right Ascension (minute) ',
'50-62 F13.10 s RAs Right Ascension (second) ',
' 64 A1 --- DE- Sign of Declination ',
'65-66 I2 deg DEd Declination (degree) ',
'68-69 I2 arcmin DEm Declination (arcmin) ',
'71-82 F12.9 arcsec DEs Declination (arcsec) ',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 1e-07 22.25608 2e+00 67 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000',
],
# fmt: on
"galactic": [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [-3160000.0/0.01] Description of e",
"16-23 F8.5 --- d [22.25/27.25] Description of d ",
"25-31 E7.1 --- s [-9e+34/2.0] Description of s ",
"33-35 I3 --- i [-30/67] Description of i ",
"37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ",
"41-42 I2 --- sameI [20] Description of sameI ",
"44-59 F16.12 deg GLON Galactic Longitude ",
"61-76 F16.12 deg GLAT Galactic Latitude ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 22.25608 2e+00 67 5.0 20 330.071639591690 -45.548080484609",
"HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 330.071639591690 -45.548080484609",
],
"ecliptic": [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-14 E5.1 --- e [-3160000.0/0.01] Description of e ",
"16-23 F8.5 --- d [22.25/27.25] Description of d ",
"25-31 E7.1 --- s [-9e+34/2.0] Description of s ",
"33-35 I3 --- i [-30/67] Description of i ",
"37-39 F3.1 --- sameF [5.0/5.0] Description of sameF ",
"41-42 I2 --- sameI [20] Description of sameI ",
"44-59 F16.12 deg ELON Ecliptic Longitude (geocentrictrueecliptic)",
"61-76 F16.12 deg ELAT Ecliptic Latitude (geocentrictrueecliptic) ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD81809 1e-07 22.25608 2e+00 67 5.0 20 306.224208650096 -45.621789850825",
"HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 306.224208650096 -45.621789850825",
],
}
def test_write_coord_cols():
"""
There can only be one such coordinate column in a single table,
because division of columns into individual component columns requires
iterating over the table columns, which will have to be done again
if additional such coordinate columns are present.
"""
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name="sameF")
t.add_column([20, 20], name="sameI")
# Coordinates of ASASSN-15lh
coord = SkyCoord(330.564375, -61.65961111, unit=u.deg)
# Coordinates of ASASSN-14li
coordp = SkyCoord(192.06343503, 17.77402684, unit=u.deg)
cols = [
Column([coord, coordp]), # Generic coordinate column
coordp, # Coordinate column with positive DEC
coord.galactic, # Galactic coordinates
coord.geocentrictrueecliptic, # Ecliptic coordinates
]
# Loop through different types of coordinate columns.
for col, coord_type in zip(cols, exp_coord_cols_output):
exp_output = exp_coord_cols_output[coord_type]
t["coord"] = col
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
# Check the written table.
assert lines == exp_output
# Check if the original table columns remains unmodified.
assert t.colnames == ["names", "e", "d", "s", "i", "sameF", "sameI", "coord"]
def test_write_byte_by_byte_bytes_col_format():
"""
Tests the alignment of Byte counts with respect to hyphen
in the Bytes column of Byte-By-Byte. The whitespace around the
hyphen is govered by the number of digits in the total Byte
count. Single Byte columns should have a single Byte count
without the hyphen.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 8 A8 --- names Description of names ",
"10-21 E12.6 --- e [-3160000.0/0.01] Description of e",
"23-30 F8.5 --- d [22.25/27.25] Description of d ",
"32-38 E7.1 --- s [-9e+34/2.0] Description of s ",
"40-42 I3 --- i [-30/67] Description of i ",
"44-46 F3.1 --- sameF [5.0/5.0] Description of sameF ",
"48-49 I2 --- sameI [20] Description of sameI ",
" 51 I1 --- singleByteCol [2] Description of singleByteCol ",
"53-54 I2 h RAh Right Ascension (hour) ",
"56-57 I2 min RAm Right Ascension (minute) ",
"59-71 F13.10 s RAs Right Ascension (second) ",
" 73 A1 --- DE- Sign of Declination ",
"74-75 I2 deg DEd Declination (degree) ",
"77-78 I2 arcmin DEm Declination (arcmin) ",
"80-91 F12.9 arcsec DEs Declination (arcsec) ",
"--------------------------------------------------------------------------------",
]
t = ascii.read(test_dat)
t.add_column([5.0, 5.0], name="sameF")
t.add_column([20, 20], name="sameI")
t["coord"] = SkyCoord(330.564375, -61.65961111, unit=u.deg)
t["singleByteCol"] = [2, 2]
t["e"].format = ".5E"
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))]
# Select only the Byte-By-Byte section.
lines = lines[i_secs[0] : i_secs[-2]]
lines.append("-" * 80) # Append a separator line.
assert lines == exp_output
def test_write_byte_by_byte_wrapping():
"""
Test line wrapping in the description column of the
Byte-By-Byte section of the ReadMe.
"""
exp_output = """\
================================================================================
Byte-by-byte Description of file: table.dat
--------------------------------------------------------------------------------
Bytes Format Units Label Explanations
--------------------------------------------------------------------------------
1- 8 A8 --- thisIsALongColumnLabel This is a tediously long
description. But they do sometimes
have them. Better to put extra
details in the notes. This is a
tediously long description. But they
do sometimes have them. Better to put
extra details in the notes.
10-14 E5.1 --- e [-3160000.0/0.01] Description of e
16-23 F8.5 --- d [22.25/27.25] Description of d
--------------------------------------------------------------------------------
"""
t = ascii.read(test_dat)
t.remove_columns(["s", "i"])
description = (
"This is a tediously long description."
+ " But they do sometimes have them."
+ " Better to put extra details in the notes. "
)
t["names"].description = description * 2
t["names"].name = "thisIsALongColumnLabel"
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))]
# Select only the Byte-By-Byte section.
lines = lines[i_secs[0] : i_secs[-2]]
lines.append("-" * 80) # Append a separator line.
assert lines == exp_output.splitlines()
def test_write_mixin_and_broken_cols():
"""
Tests conversion to string values for ``mix-in`` columns other than
``SkyCoord`` and for columns with only partial ``SkyCoord`` values.
"""
# fmt: off
exp_output = [
'================================================================================',
'Byte-by-byte Description of file: table.dat',
'--------------------------------------------------------------------------------',
' Bytes Format Units Label Explanations',
'--------------------------------------------------------------------------------',
' 1- 7 A7 --- name Description of name ',
' 9- 74 A66 --- Unknown Description of Unknown',
' 76-114 A39 --- Unknown Description of Unknown',
'116-138 A23 --- Unknown Description of Unknown',
'--------------------------------------------------------------------------------',
'Notes:',
'--------------------------------------------------------------------------------',
'HD81809 <SkyCoord (ICRS): (ra, dec) in deg',
' (330.564375, -61.65961111)> (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000',
'random 12 (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000',
]
# fmt: on
t = Table()
t["name"] = ["HD81809"]
coord = SkyCoord(330.564375, -61.65961111, unit=u.deg)
t["coord"] = Column(coord)
t.add_row(["random", 12])
t["cart"] = coord.cartesian
t["time"] = Time("2019-1-1")
out = StringIO()
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines.
# Check the written table.
assert lines == exp_output
def test_write_extra_skycoord_cols():
"""
Tests output for cases when table contains multiple ``SkyCoord`` columns.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 7 A7 --- name Description of name ",
" 9-10 I2 h RAh Right Ascension (hour) ",
"12-13 I2 min RAm Right Ascension (minute)",
"15-27 F13.10 s RAs Right Ascension (second)",
" 29 A1 --- DE- Sign of Declination ",
"30-31 I2 deg DEd Declination (degree) ",
"33-34 I2 arcmin DEm Declination (arcmin) ",
"36-47 F12.9 arcsec DEs Declination (arcsec) ",
"49-62 A14 --- coord2 Description of coord2 ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD4760 0 49 39.9000000000 +06 24 07.999200000 12.4163 6.407 ",
"HD81809 22 02 15.4500000000 -61 39 34.599996000 330.564 -61.66",
]
t = Table()
t["name"] = ["HD4760", "HD81809"]
t["coord1"] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg)
t["coord2"] = SkyCoord([12.41630, 330.564400], [6.407, -61.66], unit=u.deg)
out = StringIO()
with pytest.warns(
UserWarning,
match=r"column 2 is being skipped with designation of a "
r"string valued column `coord2`",
):
t.write(out, format="ascii.mrt")
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines.
# Check the written table.
assert lines[:-2] == exp_output[:-2]
for a, b in zip(lines[-2:], exp_output[-2:]):
assert a[:18] == b[:18]
assert a[30:42] == b[30:42]
assert_almost_equal(
np.fromstring(a[2:], sep=" "), np.fromstring(b[2:], sep=" ")
)
def test_write_skycoord_with_format():
"""
Tests output with custom setting for ``SkyCoord`` (second) columns.
"""
exp_output = [
"================================================================================",
"Byte-by-byte Description of file: table.dat",
"--------------------------------------------------------------------------------",
" Bytes Format Units Label Explanations",
"--------------------------------------------------------------------------------",
" 1- 7 A7 --- name Description of name ",
" 9-10 I2 h RAh Right Ascension (hour) ",
"12-13 I2 min RAm Right Ascension (minute)",
"15-19 F5.2 s RAs Right Ascension (second)",
" 21 A1 --- DE- Sign of Declination ",
"22-23 I2 deg DEd Declination (degree) ",
"25-26 I2 arcmin DEm Declination (arcmin) ",
"28-31 F4.1 arcsec DEs Declination (arcsec) ",
"--------------------------------------------------------------------------------",
"Notes:",
"--------------------------------------------------------------------------------",
"HD4760 0 49 39.90 +06 24 08.0",
"HD81809 22 02 15.45 -61 39 34.6",
]
t = Table()
t["name"] = ["HD4760", "HD81809"]
t["coord"] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg)
out = StringIO()
# This will raise a warning because `formats` is checked before the writer creating the
# final list of columns is called.
with pytest.warns(
AstropyWarning,
match=r"The key.s. {'[RD][AE]s', '[RD][AE]s'} specified in "
r"the formats argument do not match a column name.",
):
t.write(out, format="ascii.mrt", formats={"RAs": "05.2f", "DEs": "04.1f"})
lines = out.getvalue().splitlines()
i_bbb = lines.index("=" * 80)
lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines.
# Check the written table.
assert lines == exp_output
|
f3de6c93668aed3d6057bc272233519c2af1a7751ac42d2c9d3f322c64fe57b5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import io
import os
import re
from contextlib import nullcontext
from io import BytesIO
from textwrap import dedent
import numpy as np
import pytest
from numpy import ma
from astropy.io import ascii
from astropy.io.ascii.core import (
FastOptionsError,
InconsistentTableError,
ParameterError,
)
from astropy.io.ascii.fastbasic import (
FastBasic,
FastCommentedHeader,
FastCsv,
FastNoHeader,
FastRdb,
FastTab,
)
from astropy.table import MaskedColumn, Table
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_almost_equal, assert_equal, assert_true
StringIO = lambda x: BytesIO(x.encode("ascii"))
CI = os.environ.get("CI", False)
def assert_table_equal(t1, t2, check_meta=False, rtol=1.0e-15, atol=1.0e-300):
"""
Test equality of all columns in a table, with stricter tolerances for
float columns than the np.allclose default.
"""
assert_equal(len(t1), len(t2))
assert_equal(t1.colnames, t2.colnames)
if check_meta:
assert_equal(t1.meta, t2.meta)
for name in t1.colnames:
if len(t1) != 0:
assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)
if not isinstance(t1[name], MaskedColumn):
for i, el in enumerate(t1[name]):
try:
if not isinstance(el, str) and np.isnan(el):
assert_true(
not isinstance(t2[name][i], str) and np.isnan(t2[name][i])
)
elif isinstance(el, str):
assert_equal(el, t2[name][i])
else:
assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol)
except (TypeError, NotImplementedError):
pass # ignore for now
# Use this counter to create a unique filename for each file created in a test
# if this function is called more than once in a single test
_filename_counter = 0
def _read(
tmp_path,
table,
Reader=None,
format=None,
parallel=False,
check_meta=False,
**kwargs,
):
# make sure we have a newline so table can't be misinterpreted as a filename
global _filename_counter
table += "\n"
reader = Reader(**kwargs)
t1 = reader.read(table)
t2 = reader.read(StringIO(table))
t3 = reader.read(table.splitlines())
t4 = ascii.read(table, format=format, guess=False, **kwargs)
t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs)
assert_table_equal(t1, t2, check_meta=check_meta)
assert_table_equal(t2, t3, check_meta=check_meta)
assert_table_equal(t3, t4, check_meta=check_meta)
assert_table_equal(t4, t5, check_meta=check_meta)
if parallel:
if CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
t6 = ascii.read(
table, format=format, guess=False, fast_reader={"parallel": True}, **kwargs
)
assert_table_equal(t1, t6, check_meta=check_meta)
filename = tmp_path / f"table{_filename_counter}.txt"
_filename_counter += 1
with open(filename, "wb") as f:
f.write(table.encode("ascii"))
f.flush()
t7 = ascii.read(filename, format=format, guess=False, **kwargs)
if parallel:
t8 = ascii.read(
filename,
format=format,
guess=False,
fast_reader={"parallel": True},
**kwargs,
)
assert_table_equal(t1, t7, check_meta=check_meta)
if parallel:
assert_table_equal(t1, t8, check_meta=check_meta)
return t1
@pytest.fixture(scope="function")
def read_basic(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastBasic, format="basic")
@pytest.fixture(scope="function")
def read_csv(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastCsv, format="csv")
@pytest.fixture(scope="function")
def read_tab(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastTab, format="tab")
@pytest.fixture(scope="function")
def read_commented_header(tmp_path, request):
return functools.partial(
_read, tmp_path, Reader=FastCommentedHeader, format="commented_header"
)
@pytest.fixture(scope="function")
def read_rdb(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastRdb, format="rdb")
@pytest.fixture(scope="function")
def read_no_header(tmp_path, request):
return functools.partial(_read, tmp_path, Reader=FastNoHeader, format="no_header")
@pytest.mark.parametrize("delimiter", [",", "\t", " ", "csv"])
@pytest.mark.parametrize("quotechar", ['"', "'"])
@pytest.mark.parametrize("fast", [False, True])
def test_embedded_newlines(delimiter, quotechar, fast):
"""Test that embedded newlines are supported for io.ascii readers
and writers, both fast and Python readers."""
# Start with an assortment of values with different embedded newlines and whitespace
dat = [
["\t a ", " b \n cd ", "\n"],
[" 1\n ", '2 \n" \t 3\n4\n5', "1\n '2\n"],
[" x,y \nz\t", "\t 12\n\t34\t ", "56\t\n"],
]
dat = Table(dat, names=("a", "b", "c"))
# Construct a table which is our expected result of writing the table and
# reading it back. Certain stripping of whitespace is expected.
exp = {} # expected output from reading
for col in dat.itercols():
vals = []
for val in col:
# Readers and writers both strip whitespace from ends of values
val = val.strip(" \t")
if not fast:
# Pure Python reader has a "feature" where it strips trailing
# whitespace from each input line. This means a value like
# " x \ny \t\n" gets read as "x\ny".
bits = val.splitlines(keepends=True)
bits_out = []
for bit in bits:
bit = re.sub(r"[ \t]+(\n?)$", r"\1", bit.strip(" \t"))
bits_out.append(bit)
val = "".join(bits_out)
vals.append(val)
exp[col.info.name] = vals
exp = Table(exp)
if delimiter == "csv":
format = "csv"
delimiter = ","
else:
format = "basic"
# Write the table to `text`
fh = io.StringIO()
ascii.write(
dat,
fh,
format=format,
delimiter=delimiter,
quotechar=quotechar,
fast_writer=fast,
)
text = fh.getvalue()
# Read it back and compare to the expected
dat_out = ascii.read(
text,
format=format,
guess=False,
delimiter=delimiter,
quotechar=quotechar,
fast_reader=fast,
)
eq = dat_out.values_equal(exp)
assert all(np.all(col) for col in eq.itercols())
@pytest.mark.parametrize("parallel", [True, False])
def test_simple_data(parallel, read_basic):
"""
Make sure the fast reader works with basic input data.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(table, expected)
def test_read_types():
"""
Make sure that the read() function takes filenames,
strings, and lists of strings in addition to file-like objects.
"""
t1 = ascii.read("a b c\n1 2 3\n4 5 6", format="fast_basic", guess=False)
# TODO: also read from file
t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format="fast_basic", guess=False)
t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format="fast_basic", guess=False)
assert_table_equal(t1, t2)
assert_table_equal(t2, t3)
@pytest.mark.parametrize("parallel", [True, False])
def test_supplied_names(parallel, read_basic):
"""
If passed as a parameter, names should replace any
column names found in the header.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", names=("X", "Y", "Z"), parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("X", "Y", "Z"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header(parallel, read_basic, read_no_header):
"""
The header should not be read when header_start=None. Unless names is
passed, the column names should be auto-generated.
"""
# Cannot set header_start=None for basic format
with pytest.raises(ValueError):
read_basic(
"A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel
)
t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table(
[["A", "1", "4"], ["B", "2", "5"], ["C", "3", "6"]],
names=("col1", "col2", "col3"),
)
assert_table_equal(t2, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header_supplied_names(parallel, read_basic, read_no_header):
"""
If header_start=None and names is passed as a parameter, header
data should not be read and names should be used instead.
"""
table = read_no_header(
"A B C\n1 2 3\n4 5 6", names=("X", "Y", "Z"), parallel=parallel
)
expected = Table(
[["A", "1", "4"], ["B", "2", "5"], ["C", "3", "6"]], names=("X", "Y", "Z")
)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_comment(parallel, read_basic):
"""
Make sure that line comments are ignored by the C reader.
"""
table = read_basic(
"# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel
)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_lines(parallel, read_basic):
"""
Make sure that empty lines are ignored by the C reader.
"""
table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_lstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the beginning of fields.
"""
text = """
1, 2, \t3
A,\t\t B, C
a, b, c
\n"""
table = read_basic(text, delimiter=",", parallel=parallel)
expected = Table([["A", "a"], ["B", "b"], ["C", "c"]], names=("1", "2", "3"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_rstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the end of fields.
"""
text = " 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n"
table = read_basic(text, delimiter=",", parallel=parallel)
expected = Table([["A", "a"], ["B", "b"], ["C", "c"]], names=("1", "2", "3"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_conversion(parallel, read_basic):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = read_basic(text, parallel=parallel)
assert_equal(table["A"].dtype.kind, "f")
assert table["B"].dtype.kind in ("S", "U")
assert_equal(table["C"].dtype.kind, "i")
assert_equal(table["D"].dtype.kind, "f")
assert table["E"].dtype.kind in ("S", "U")
assert table["F"].dtype.kind in ("S", "U")
assert table["G"].dtype.kind in ("S", "U")
assert table["H"].dtype.kind in ("S", "U")
@pytest.mark.parametrize("parallel", [True, False])
def test_delimiter(parallel, read_basic):
"""
Make sure that different delimiters work as expected.
"""
text = dedent(
"""
COL1 COL2 COL3
1 A -1
2 B -2
"""
)
expected = Table([[1, 2], ["A", "B"], [-1, -2]], names=("COL1", "COL2", "COL3"))
for sep in " ,\t#;":
table = read_basic(text.replace(" ", sep), delimiter=sep, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_names(parallel, read_basic):
"""
If include_names is not None, the parser should read only those columns in include_names.
"""
table = read_basic(
"A B C D\n1 2 3 4\n5 6 7 8", include_names=["A", "D"], parallel=parallel
)
expected = Table([[1, 5], [4, 8]], names=("A", "D"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_exclude_names(parallel, read_basic):
"""
If exclude_names is not None, the parser should exclude the columns in exclude_names.
"""
table = read_basic(
"A B C D\n1 2 3 4\n5 6 7 8", exclude_names=["A", "D"], parallel=parallel
)
expected = Table([[2, 6], [3, 7]], names=("B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_exclude_names(parallel, read_basic):
"""
Make sure that include_names is applied before exclude_names if both are specified.
"""
text = dedent(
"""
A B C D E F G H
1 2 3 4 5 6 7 8
9 10 11 12 13 14 15 16
"""
)
table = read_basic(
text,
include_names=["A", "B", "D", "F", "H"],
exclude_names=["B", "F"],
parallel=parallel,
)
expected = Table([[1, 9], [4, 12], [8, 16]], names=("A", "D", "H"))
assert_table_equal(table, expected)
def test_doubled_quotes(read_csv):
"""
Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted
field was incorrect.
"""
tbl = "\n".join( # noqa: FLY002
[
"a,b",
'"d""","d""q"',
'"""q",""""',
]
)
# fmt: off
expected = Table([['d"', '"q'],
['d"q', '"']],
names=('a', 'b'))
# fmt: on
dat = read_csv(tbl)
assert_table_equal(dat, expected)
# In addition to the local read_csv wrapper, check that default
# parsing with guessing gives the right answer.
for fast_reader in True, False:
dat = ascii.read(tbl, fast_reader=fast_reader)
assert_table_equal(dat, expected)
@pytest.mark.filterwarnings(
"ignore:OverflowError converting to IntType in column TIMESTAMP"
)
def test_doubled_quotes_segv():
"""
Test the exact example from #8281 which resulted in SEGV prior to #8283
(in contrast to the tests above that just gave the wrong answer).
Attempts to produce a more minimal example were unsuccessful, so the whole
thing is included.
"""
tbl = dedent(
"""
"ID","TIMESTAMP","addendum_id","bib_reference","bib_reference_url","client_application","client_category","client_sort_key","color","coordsys","creator","creator_did","data_pixel_bitpix","dataproduct_subtype","dataproduct_type","em_max","em_min","format","hips_builder","hips_copyright","hips_creation_date","hips_creation_date_1","hips_creator","hips_data_range","hips_estsize","hips_frame","hips_glu_tag","hips_hierarchy","hips_initial_dec","hips_initial_fov","hips_initial_ra","hips_lon_asc","hips_master_url","hips_order","hips_order_1","hips_order_4","hips_order_min","hips_overlay","hips_pixel_bitpix","hips_pixel_cut","hips_pixel_scale","hips_progenitor_url","hips_publisher","hips_release_date","hips_release_date_1","hips_rgb_blue","hips_rgb_green","hips_rgb_red","hips_sampling","hips_service_url","hips_service_url_1","hips_service_url_2","hips_service_url_3","hips_service_url_4","hips_service_url_5","hips_service_url_6","hips_service_url_7","hips_service_url_8","hips_skyval","hips_skyval_method","hips_skyval_value","hips_status","hips_status_1","hips_status_2","hips_status_3","hips_status_4","hips_status_5","hips_status_6","hips_status_7","hips_status_8","hips_tile_format","hips_tile_format_1","hips_tile_format_4","hips_tile_width","hips_version","hipsgen_date","hipsgen_date_1","hipsgen_date_10","hipsgen_date_11","hipsgen_date_12","hipsgen_date_2","hipsgen_date_3","hipsgen_date_4","hipsgen_date_5","hipsgen_date_6","hipsgen_date_7","hipsgen_date_8","hipsgen_date_9","hipsgen_params","hipsgen_params_1","hipsgen_params_10","hipsgen_params_11","hipsgen_params_12","hipsgen_params_2","hipsgen_params_3","hipsgen_params_4","hipsgen_params_5","hipsgen_params_6","hipsgen_params_7","hipsgen_params_8","hipsgen_params_9","label","maxOrder","moc_access_url","moc_order","moc_release_date","moc_sky_fraction","obs_ack","obs_collection","obs_copyrigh_url","obs_copyright","obs_copyright_1","obs_copyright_url","obs_copyright_url_1","obs_description","obs_description_url","obs_descrition_url","obs_id","obs_initial_dec","obs_initial_fov","obs_initial_ra","obs_provenance","obs_regime","obs_title","ohips_frame","pixelCut","pixelRange","prov_did","prov_progenitor","prov_progenitor_url","publisher_did","publisher_id","s_pixel_scale","t_max","t_min"
"CDS/P/2MASS/H","1524123841000","","2006AJ....131.1163S","http://cdsbib.unistra.fr/cgi-bin/cdsbib?2006AJ....131.1163S","AladinDesktop","Image/Infrared/2MASS","04-001-03","","","","ivo://CDS/P/2MASS/H","","","image","1.798E-6","1.525E-6","","Aladin/HipsGen v9.017","CNRS/Unistra","2013-05-06T20:36Z","","CDS (A.Oberto)","","","equatorial","","mean","","","","","","9","","","","","","0 60","2.236E-4","","","2016-04-22T13:48Z","","","","","","http://alasky.unistra.fr/2MASS/H","https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H","http://alaskybis.unistra.fr/2MASS/H","https://alaskybis.unistra.fr/2MASS/H","","","","","","","","","public master clonableOnce","public mirror unclonable","public mirror clonableOnce","public mirror clonableOnce","","","","","","jpeg fits","","","512","1.31","","","","","","","","","","","","","","","","","","","","","","","","","","","","","http://alasky.unistra.fr/2MASS/H/Moc.fits","9","","1","University of Massachusetts & IPAC/Caltech","The Two Micron All Sky Survey - H band (2MASS H)","","University of Massachusetts & IPAC/Caltech","","http://www.ipac.caltech.edu/2mass/","","2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0"". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF","","","","+0","0.11451621372724685","0","","Infrared","2MASS H (1.66um)","","","","","IPAC/NASA","","","","","51941","50600"
"""
)
ascii.read(tbl, format="csv", fast_reader=True, guess=False)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_fields(parallel, read_basic):
"""
The character quotechar (default '"') should denote the start of a field which can
contain the field delimiter and newlines.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = dedent(
"""
"A B" C D
1.5 2.1 -37.1
a b " c
d"
"""
)
table = read_basic(text, parallel=parallel)
expected = Table(
[["1.5", "a"], ["2.1", "b"], ["-37.1", "c\nd"]], names=("A B", "C", "D")
)
assert_table_equal(table, expected)
table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize(
"key,val",
[
("delimiter", ",,"), # multi-char delimiter
("comment", "##"), # multi-char comment
("data_start", None), # data_start=None
("data_start", -1), # data_start negative
("quotechar", "##"), # multi-char quote signifier
("header_start", -1), # negative header_start
(
"converters",
{i + 1: ascii.convert_numpy(np.uint) for i in range(3)},
), # passing converters
("Inputter", ascii.ContinuationLinesInputter), # passing Inputter
("header_Splitter", ascii.DefaultSplitter), # passing Splitter
("data_Splitter", ascii.DefaultSplitter),
],
)
def test_invalid_parameters(key, val):
"""
Make sure the C reader raises an error if passed parameters it can't handle.
"""
with pytest.raises(ParameterError):
FastBasic(**{key: val}).read("1 2 3\n4 5 6")
with pytest.raises(ParameterError):
ascii.read("1 2 3\n4 5 6", format="fast_basic", guess=False, **{key: val})
def test_invalid_parameters_other():
with pytest.raises(TypeError):
FastBasic(foo=7).read("1 2 3\n4 5 6") # unexpected argument
with pytest.raises(FastOptionsError): # don't fall back on the slow reader
ascii.read("1 2 3\n4 5 6", format="basic", fast_reader={"foo": 7})
with pytest.raises(ParameterError):
# Outputter cannot be specified in constructor
FastBasic(Outputter=ascii.TableOutputter).read("1 2 3\n4 5 6")
def test_too_many_cols1():
"""
If a row contains too many columns, the C reader should raise an error.
"""
text = dedent(
"""
A B C
1 2 3
4 5 6
7 8 9 10
11 12 13
"""
)
with pytest.raises(InconsistentTableError) as e:
FastBasic().read(text)
assert (
"Number of header columns (3) inconsistent with data columns in data line 2"
in str(e.value)
)
def test_too_many_cols2():
text = """\
aaa,bbb
1,2,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert (
"Number of header columns (2) inconsistent with data columns in data line 0"
in str(e.value)
)
def test_too_many_cols3():
text = """\
aaa,bbb
1,2,,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert (
"Number of header columns (2) inconsistent with data columns in data line 0"
in str(e.value)
)
def test_too_many_cols4():
# https://github.com/astropy/astropy/issues/9922
with pytest.raises(InconsistentTableError) as e:
ascii.read(
get_pkg_data_filename("data/conf_py.txt"), fast_reader=True, guess=True
)
assert "Unable to guess table format with the guesses listed below" in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_not_enough_cols(parallel, read_csv):
"""
If a row does not have enough columns, the FastCsv reader should add empty
fields while the FastBasic reader should raise an error.
"""
text = """
A,B,C
1,2,3
4,5
6,7,8
"""
table = read_csv(text, parallel=parallel)
assert table["B"][1] is not ma.masked
assert table["C"][1] is ma.masked
with pytest.raises(InconsistentTableError):
table = FastBasic(delimiter=",").read(text)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_end(parallel, read_basic, read_rdb):
"""
The parameter data_end should specify where data reading ends.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9
10 11 12
"""
table = read_basic(text, data_end=3, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(table, expected)
# data_end supports negative indexing
table = read_basic(text, data_end=-2, parallel=parallel)
assert_table_equal(table, expected)
text = """
A\tB\tC
N\tN\tS
1\t2\ta
3\t4\tb
5\t6\tc
"""
# make sure data_end works with RDB
table = read_rdb(text, data_end=-1, parallel=parallel)
expected = Table([[1, 3], [2, 4], ["a", "b"]], names=("A", "B", "C"))
assert_table_equal(table, expected)
# positive index
table = read_rdb(text, data_end=3, parallel=parallel)
expected = Table([[1], [2], ["a"]], names=("A", "B", "C"))
assert_table_equal(table, expected)
# empty table if data_end is too small
table = read_rdb(text, data_end=1, parallel=parallel)
expected = Table([[], [], []], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_inf_nan(parallel, read_basic):
"""
Test that inf and nan-like values are correctly parsed on all platforms.
Regression test for https://github.com/astropy/astropy/pull/3525
"""
text = dedent(
"""\
A
nan
+nan
-nan
inf
infinity
+inf
+infinity
-inf
-infinity
"""
)
expected = Table(
{
"A": [
np.nan,
np.nan,
np.nan,
np.inf,
np.inf,
np.inf,
np.inf,
-np.inf,
-np.inf,
]
}
)
table = read_basic(text, parallel=parallel)
assert table["A"].dtype.kind == "f"
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_values(parallel, read_basic):
"""
Make sure that the parameter fill_values works as intended. If fill_values
is not specified, the default behavior should be to convert '' to 0.
"""
text = """
A, B, C
, 2, nan
a, -999, -3.4
nan, 5, -9999
8, nan, 7.6e12
"""
table = read_basic(text, delimiter=",", parallel=parallel)
# The empty value in row A should become a masked '0'
assert isinstance(table["A"], MaskedColumn)
assert table["A"][0] is ma.masked
# '0' rather than 0 because there is a string in the column
assert_equal(table["A"].data.data[0], "0")
assert table["A"][1] is not ma.masked
table = read_basic(
text, delimiter=",", fill_values=("-999", "0"), parallel=parallel
)
assert isinstance(table["B"], MaskedColumn)
assert table["A"][0] is not ma.masked # empty value unaffected
assert table["C"][2] is not ma.masked # -9999 is not an exact match
assert table["B"][1] is ma.masked
# Numeric because the rest of the column contains numeric data
assert_equal(table["B"].data.data[1], 0.0)
assert table["B"][0] is not ma.masked
table = read_basic(text, delimiter=",", fill_values=[], parallel=parallel)
# None of the columns should be masked
for name in "ABC":
assert not isinstance(table[name], MaskedColumn)
table = read_basic(
text,
delimiter=",",
fill_values=[("", "0", "A"), ("nan", "999", "A", "C")],
parallel=parallel,
)
assert np.isnan(table["B"][3]) # nan filling skips column B
# should skip masking as well as replacing nan
assert table["B"][3] is not ma.masked
assert table["A"][0] is ma.masked
assert table["A"][2] is ma.masked
assert_equal(table["A"].data.data[0], "0")
assert_equal(table["A"].data.data[2], "999")
assert table["C"][0] is ma.masked
assert_almost_equal(table["C"].data.data[0], 999.0)
assert_almost_equal(table["C"][1], -3.4) # column is still of type float
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_include_exclude_names(parallel, read_csv):
"""
fill_include_names and fill_exclude_names should filter missing/empty value handling
in the same way that include_names and exclude_names filter output columns.
"""
text = """
A, B, C
, 1, 2
3, , 4
5, 5,
"""
table = read_csv(text, fill_include_names=["A", "B"], parallel=parallel)
assert table["A"][0] is ma.masked
assert table["B"][1] is ma.masked
assert table["C"][2] is not ma.masked # C not in fill_include_names
table = read_csv(text, fill_exclude_names=["A", "B"], parallel=parallel)
assert table["C"][2] is ma.masked
assert table["A"][0] is not ma.masked
assert table["B"][1] is not ma.masked # A and B excluded from fill handling
table = read_csv(
text, fill_include_names=["A", "B"], fill_exclude_names=["B"], parallel=parallel
)
assert table["A"][0] is ma.masked
# fill_exclude_names applies after fill_include_names
assert table["B"][1] is not ma.masked
assert table["C"][2] is not ma.masked
@pytest.mark.parametrize("parallel", [True, False])
def test_many_rows(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of rows
is large (so that each column string is longer than INITIAL_COL_SIZE).
"""
text = "A B C\n"
for i in range(500): # create 500 rows
text += " ".join([str(i) for i in range(3)])
text += "\n"
table = read_basic(text, parallel=parallel)
expected = Table([[0] * 500, [1] * 500, [2] * 500], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_many_columns(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of columns
is large (so that each header string is longer than INITIAL_HEADER_SIZE).
"""
# create a string with 500 columns and two data rows
text = " ".join([str(i) for i in range(500)])
text += "\n" + text + "\n" + text
table = read_basic(text, parallel=parallel)
expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])
assert_table_equal(table, expected)
def test_fast_reader():
"""
Make sure that ascii.read() works as expected by default and with
fast_reader specified.
"""
text = "a b c\n1 2 3\n4 5 6"
with pytest.raises(ParameterError): # C reader can't handle regex comment
ascii.read(text, format="fast_basic", guess=False, comment="##")
# Enable multiprocessing and the fast converter
try:
ascii.read(
text,
format="basic",
guess=False,
fast_reader={"parallel": True, "use_fast_converter": True},
)
except NotImplementedError:
# Might get this on Windows, try without parallel...
if os.name == "nt":
ascii.read(
text,
format="basic",
guess=False,
fast_reader={"parallel": False, "use_fast_converter": True},
)
else:
raise
# Should raise an error if fast_reader has an invalid key
with pytest.raises(FastOptionsError):
ascii.read(text, format="fast_basic", guess=False, fast_reader={"foo": True})
# Use the slow reader instead
ascii.read(text, format="basic", guess=False, comment="##", fast_reader=False)
# Will try the slow reader afterwards by default
ascii.read(text, format="basic", guess=False, comment="##")
@pytest.mark.parametrize("parallel", [True, False])
def test_read_tab(parallel, read_tab):
"""
The fast reader for tab-separated values should not strip whitespace, unlike
the basic reader.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t '
table = read_tab(text, parallel=parallel)
assert_equal(table["1"][0], " a") # preserve line whitespace
assert_equal(table["2"][0], " b ") # preserve field whitespace
assert table["3"][0] is ma.masked # empty value should be masked
assert_equal(table["2"][1], " d\n e") # preserve whitespace in quoted fields
assert_equal(table["3"][1], " ") # preserve end-of-line whitespace
@pytest.mark.parametrize("parallel", [True, False])
def test_default_data_start(parallel, read_basic):
"""
If data_start is not explicitly passed to read(), data processing should
beginning right after the header.
"""
text = "ignore this line\na b c\n1 2 3\n4 5 6"
table = read_basic(text, header_start=1, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("a", "b", "c"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_commented_header(parallel, read_commented_header):
"""
The FastCommentedHeader reader should mimic the behavior of the
CommentedHeader by overriding the default header behavior of FastBasic.
"""
text = """
# A B C
1 2 3
4 5 6
"""
t1 = read_commented_header(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C"))
assert_table_equal(t1, expected)
text = "# first commented line\n # second commented line\n\n" + text
t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel)
assert_table_equal(t2, expected)
# negative indexing allowed
t3 = read_commented_header(text, header_start=-1, data_start=0, parallel=parallel)
assert_table_equal(t3, expected)
text += "7 8 9"
t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel)
expected = Table([[7], [8], [9]], names=("A", "B", "C"))
assert_table_equal(t4, expected)
with pytest.raises(ParameterError):
# data_start cannot be negative
read_commented_header(text, header_start=-1, data_start=-1, parallel=parallel)
@pytest.mark.parametrize("parallel", [True, False])
def test_rdb(parallel, read_rdb):
"""
Make sure the FastRdb reader works as expected.
"""
text = """
A\tB\tC
1n\tS\t4N
1\t 9\t4.3
"""
table = read_rdb(text, parallel=parallel)
expected = Table([[1], [" 9"], [4.3]], names=("A", "B", "C"))
assert_table_equal(table, expected)
assert_equal(table["A"].dtype.kind, "i")
assert table["B"].dtype.kind in ("S", "U")
assert_equal(table["C"].dtype.kind, "f")
with pytest.raises(ValueError) as e:
text = "A\tB\tC\nN\tS\tN\n4\tb\ta" # C column contains non-numeric data
read_rdb(text, parallel=parallel)
assert "Column C failed to convert" in str(e.value)
with pytest.raises(ValueError) as e:
text = "A\tB\tC\nN\tN\n1\t2\t3" # not enough types specified
read_rdb(text, parallel=parallel)
assert "mismatch between number of column names and column types" in str(e.value)
with pytest.raises(ValueError) as e:
text = "A\tB\tC\nN\tN\t5\n1\t2\t3" # invalid type for column C
read_rdb(text, parallel=parallel)
assert "type definitions do not all match [num](N|S)" in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_start(parallel, read_basic):
"""
Make sure that data parsing begins at data_start (ignoring empty and
commented lines but not taking quoted values into account).
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
A B C
1 2 3
4 5 6
7 8 "9
1"
# comment
10 11 12
"""
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table(
[[4, 7, 10], [5, 8, 11], ["6", "9\n1", "12"]], names=("A", "B", "C")
)
assert_table_equal(table, expected)
table = read_basic(text, data_start=3, parallel=parallel)
# ignore empty line
expected = Table([[7, 10], [8, 11], ["9\n1", "12"]], names=("A", "B", "C"))
assert_table_equal(table, expected)
with pytest.raises(InconsistentTableError) as e:
# tries to begin in the middle of quoted field
read_basic(text, data_start=4, parallel=parallel)
assert "header columns (3) inconsistent with data columns in data line 0" in str(
e.value
)
table = read_basic(text, data_start=5, parallel=parallel)
# ignore commented line
expected = Table([[10], [11], [12]], names=("A", "B", "C"))
assert_table_equal(table, expected)
text = """
A B C
1 2 3
4 5 6
7 8 9
# comment
10 11 12
"""
# make sure reading works as expected in parallel
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=("A", "B", "C"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_empty_values(parallel, read_basic):
"""
Quoted empty values spanning multiple lines should be treated correctly.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = 'a b c\n1 2 " \n "'
table = read_basic(text, parallel=parallel)
assert table["c"][0] == "\n" # empty value masked by default
@pytest.mark.parametrize("parallel", [True, False])
def test_csv_comment_default(parallel, read_csv):
"""
Unless the comment parameter is specified, the CSV reader should
not treat any lines as comments.
"""
text = "a,b,c\n#1,2,3\n4,5,6"
table = read_csv(text, parallel=parallel)
expected = Table([["#1", "4"], [2, 5], [3, 6]], names=("a", "b", "c"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_whitespace_before_comment(parallel, read_tab):
"""
Readers that don't strip whitespace from data (Tab, RDB)
should still treat lines with leading whitespace and then
the comment char as comment lines.
"""
text = "a\tb\tc\n # comment line\n1\t2\t3"
table = read_tab(text, parallel=parallel)
expected = Table([[1], [2], [3]], names=("a", "b", "c"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_strip_line_trailing_whitespace(parallel, read_basic):
"""
Readers that strip whitespace from lines should ignore
trailing whitespace after the last data value of each
row.
"""
text = "a b c\n1 2 \n3 4 5"
with pytest.raises(InconsistentTableError) as e:
ascii.read(StringIO(text), format="fast_basic", guess=False)
assert "header columns (3) inconsistent with data columns in data line 0" in str(
e.value
)
text = "a b c\n 1 2 3 \t \n 4 5 6 "
table = read_basic(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=("a", "b", "c"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_data(parallel, read_basic):
"""
As long as column names are supplied, the C reader
should return an empty table in the absence of data.
"""
table = read_basic("a b c", parallel=parallel)
expected = Table([[], [], []], names=("a", "b", "c"))
assert_table_equal(table, expected)
table = read_basic("a b c\n1 2 3", data_start=2, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_line_endings(parallel, read_basic, read_commented_header, read_rdb):
"""
Make sure the fast reader accepts CR and CR+LF
as newlines.
"""
text = "a b c\n1 2 3\n4 5 6\n7 8 9\n"
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=("a", "b", "c"))
for newline in ("\r\n", "\r"):
table = read_basic(text.replace("\n", newline), parallel=parallel)
assert_table_equal(table, expected)
# Make sure the splitlines() method of FileString
# works with CR/CR+LF line endings
text = "#" + text
for newline in ("\r\n", "\r"):
table = read_commented_header(text.replace("\n", newline), parallel=parallel)
assert_table_equal(table, expected)
expected = Table(
[MaskedColumn([1, 4, 7]), [2, 5, 8], MaskedColumn([3, 6, 9])],
names=("a", "b", "c"),
)
expected["a"][0] = np.ma.masked
expected["c"][0] = np.ma.masked
text = "a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n"
for newline in ("\r\n", "\r"):
table = read_rdb(text.replace("\n", newline), parallel=parallel)
assert_table_equal(table, expected)
assert np.all(table == expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_store_comments(parallel, read_basic):
"""
Make sure that the output Table produced by the fast
reader stores any comment lines in its meta attribute.
"""
text = """
# header comment
a b c
# comment 2
# comment 3
1 2 3
4 5 6
"""
table = read_basic(text, parallel=parallel, check_meta=True)
assert_equal(table.meta["comments"], ["header comment", "comment 2", "comment 3"])
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_quotes(parallel, read_basic):
"""
Make sure the C reader doesn't segfault when the
input data contains empty quotes. [#3407]
"""
table = read_basic('a b\n1 ""\n2 ""', parallel=parallel)
expected = Table([[1, 2], [0, 0]], names=("a", "b"))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fast_tab_with_names(parallel, read_tab):
"""
Make sure the C reader doesn't segfault when the header for the
first column is missing [#3545]
"""
content = """#
\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot
-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t"""
head = [f"A{i}" for i in range(28)]
read_tab(content, data_start=1, parallel=parallel, names=head)
@pytest.mark.hugemem
def test_read_big_table(tmp_path):
"""Test reading of a huge file.
This test generates a huge CSV file (~2.3Gb) before reading it (see
https://github.com/astropy/astropy/pull/5319). The test is run only if the
``--run-hugemem`` cli option is given. Note that running the test requires
quite a lot of memory (~18Gb when reading the file) !!
"""
NB_ROWS = 250000
NB_COLS = 500
filename = tmp_path / "big_table.csv"
print(f"Creating a {NB_ROWS} rows table ({NB_COLS} columns).")
data = np.random.random(NB_ROWS)
t = Table(data=[data] * NB_COLS, names=[str(i) for i in range(NB_COLS)])
data = None
print(f"Saving the table to {filename}")
t.write(filename, format="ascii.csv", overwrite=True)
t = None
print(
"Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header)."
)
with open(filename) as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format="ascii.csv", fast_reader=True)
assert len(t) == NB_ROWS
@pytest.mark.hugemem
def test_read_big_table2(tmp_path):
"""Test reading of a file with a huge column."""
# (2**32 // 2) : max value for int
# // 10 : we use a value for rows that have 10 chars (1e9)
# + 5 : add a few lines so the length cannot be stored by an int
NB_ROWS = 2**32 // 2 // 10 + 5
filename = tmp_path / "big_table.csv"
print(f"Creating a {NB_ROWS} rows table.")
data = np.full(NB_ROWS, int(1e9), dtype=np.int32)
t = Table(data=[data], names=["a"], copy=False)
print(f"Saving the table to {filename}")
t.write(filename, format="ascii.csv", overwrite=True)
t = None
print(
"Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header)."
)
with open(filename) as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format="ascii.csv", fast_reader=True)
assert len(t) == NB_ROWS
# Test these both with guessing turned on and off
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize(
"fast_reader",
[False, {"use_fast_converter": False}, {"use_fast_converter": True}],
)
@pytest.mark.parametrize("parallel", [False, True])
def test_data_out_of_range(parallel, fast_reader, guess):
"""
Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|)
shall be returned as 0 and +-inf respectively by the C parser, just like
the Python parser.
Test fast converter only to nominal accuracy.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.0e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader["parallel"] = parallel
if fast_reader.get("use_fast_converter"):
rtol = 1.0e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
test_for_warnings = fast_reader and not parallel
if not parallel and not fast_reader:
ctx = nullcontext()
else:
ctx = pytest.warns()
fields = ["10.1E+199", "3.14e+313", "2048e+306", "0.6E-325", "-2.e345"]
values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])
# NOTE: Warning behavior varies for the parameters being passed in.
with ctx as w:
t = ascii.read(
StringIO(" ".join(fields)),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
if test_for_warnings: # Assert precision warnings for cols 2-5
assert len(w) == 4
for i in range(len(w)):
assert f"OverflowError converting to FloatType in column col{i+2}" in str(
w[i].message
)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)
# Test some additional corner cases
fields = [
".0101E202",
"0.000000314E+314",
"1777E+305",
"-1799E+305",
"0.2e-323",
"5200e-327",
" 0.0000000000000000000001024E+330",
]
values = np.array(
[1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308]
)
with ctx as w:
t = ascii.read(
StringIO(" ".join(fields)),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
if test_for_warnings: # Assert precision warnings for cols 4-6
assert len(w) == 3
for i in range(len(w)):
assert f"OverflowError converting to FloatType in column col{i+4}" in str(
w[i].message
)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)
# Test corner cases again with non-standard exponent_style (auto-detection)
if fast_reader and fast_reader.get("use_fast_converter"):
fast_reader.update({"exponent_style": "A"})
else:
pytest.skip("Fortran exponent style only available in fast converter")
fields = [
".0101D202",
"0.000000314d+314",
"1777+305",
"-1799E+305",
"0.2e-323",
"2500-327",
" 0.0000000000000000000001024Q+330",
]
with ctx as w:
t = ascii.read(
StringIO(" ".join(fields)),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
if test_for_warnings:
assert len(w) == 3
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize(
"fast_reader",
[False, {"use_fast_converter": False}, {"use_fast_converter": True}],
)
@pytest.mark.parametrize("parallel", [False, True])
def test_data_at_range_limit(parallel, fast_reader, guess):
"""
Test parsing of fixed-format float64 numbers near range limits
(|~4.94e-324 to 1.7977e+308|) - within limit for full precision
(|~2.5e-307| for strtod C parser, factor 10 better for fast_converter)
exact numbers shall be returned, beyond that an Overflow warning raised.
Input of exactly 0.0 must not raise an OverflowError.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.0e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader["parallel"] = parallel
if fast_reader.get("use_fast_converter"):
rtol = 1.0e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
# Test very long fixed-format strings (to strtod range limit w/o Overflow)
for D in 99, 202, 305:
t = ascii.read(
StringIO(99 * "0" + "." + D * "0" + "1"),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
assert_almost_equal(t["col1"][0], 10.0 ** -(D + 1), rtol=rtol, atol=1.0e-324)
for D in 99, 202, 308:
t = ascii.read(
StringIO("1" + D * "0" + ".0"),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
assert_almost_equal(t["col1"][0], 10.0**D, rtol=rtol, atol=1.0e-324)
# 0.0 is always exact (no Overflow warning)!
for s in "0.0", "0.0e+0", 399 * "0" + "." + 365 * "0":
t = ascii.read(
StringIO(s), format="no_header", guess=guess, fast_reader=fast_reader
)
assert t["col1"][0] == 0.0
# Test OverflowError at precision limit with laxer rtol
if parallel:
pytest.skip("Catching warnings broken in parallel mode")
elif not fast_reader:
pytest.skip("Python/numpy reader does not raise on Overflow")
with pytest.warns() as warning_lines:
t = ascii.read(
StringIO("0." + 314 * "0" + "1"),
format="no_header",
guess=guess,
fast_reader=fast_reader,
)
n_warns = len(warning_lines)
assert n_warns in (0, 1), f"Expected 0 or 1 warning, found {n_warns}"
if n_warns == 1:
assert (
"OverflowError converting to FloatType in column col1, possibly "
"resulting in degraded precision" in str(warning_lines[0].message)
)
assert_almost_equal(t["col1"][0], 1.0e-315, rtol=1.0e-10, atol=1.0e-324)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_int_out_of_range(parallel, guess):
"""
Integer numbers outside int range shall be returned as string columns
consistent with the standard (Python) parser (no 'upcasting' to float).
"""
imin = np.iinfo(int).min + 1
imax = np.iinfo(int).max - 1
huge = f"{imax+2:d}"
text = f"P M S\n {imax:d} {imin:d} {huge:s}"
expected = Table([[imax], [imin], [huge]], names=("P", "M", "S"))
# NOTE: Warning behavior varies for the parameters being passed in.
with pytest.warns() as w:
table = ascii.read(
text, format="basic", guess=guess, fast_reader={"parallel": parallel}
)
if not parallel:
assert len(w) == 1
assert (
"OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message)
)
assert_table_equal(table, expected)
# Check with leading zeroes to make sure strtol does not read them as octal
text = f"P M S\n000{imax:d} -0{-imin:d} 00{huge:s}"
expected = Table([[imax], [imin], ["00" + huge]], names=("P", "M", "S"))
with pytest.warns() as w:
table = ascii.read(
text, format="basic", guess=guess, fast_reader={"parallel": parallel}
)
if not parallel:
assert len(w) == 1
assert (
"OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message)
)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
def test_int_out_of_order(guess):
"""
Mixed columns should be returned as float, but if the out-of-range integer
shows up first, it will produce a string column - with both readers.
Broken with the parallel fast_reader.
"""
imax = np.iinfo(int).max - 1
text = f"A B\n 12.3 {imax:d}0\n {imax:d}0 45.6e7"
expected = Table([[12.3, 10.0 * imax], [f"{imax:d}0", "45.6e7"]], names=("A", "B"))
with pytest.warns(
AstropyWarning,
match=r"OverflowError converting to "
r"IntType in column B, reverting to String",
):
table = ascii.read(text, format="basic", guess=guess, fast_reader=True)
assert_table_equal(table, expected)
with pytest.warns(
AstropyWarning,
match=r"OverflowError converting to "
r"IntType in column B, reverting to String",
):
table = ascii.read(text, format="basic", guess=guess, fast_reader=False)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_reader(parallel, guess):
"""
Make sure that ascii.read() can read Fortran-style exponential notation
using the fast_reader.
"""
# Check for nominal np.float64 precision
rtol = 1.0e-15
atol = 0.0
text = (
"A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n"
+ " 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309"
)
expc = Table(
[[1.0001e101, 0.42], [2, 0.5], [2.0e-103, 6.0e3], [3, 1.7e307]],
names=("A", "B", "C", "D"),
)
expstyles = {
"e": 6 * "E",
"D": ("D", "d", "d", "D", "d", "D"),
"Q": 3 * ("q", "Q"),
"Fortran": ("E", "0", "D", "Q", "d", "0"),
}
# C strtod (not-fast converter) can't handle Fortran exp
with pytest.raises(FastOptionsError) as e:
ascii.read(
text.format(*(6 * "D")),
format="basic",
guess=guess,
fast_reader={
"use_fast_converter": False,
"parallel": parallel,
"exponent_style": "D",
},
)
assert "fast_reader: exponent_style requires use_fast_converter" in str(e.value)
# Enable multiprocessing and the fast converter iterate over
# all style-exponent combinations, with auto-detection
for s, c in expstyles.items():
table = ascii.read(
text.format(*c),
guess=guess,
fast_reader={"parallel": parallel, "exponent_style": s},
)
assert_table_equal(table, expc, rtol=rtol, atol=atol)
# Additional corner-case checks including triple-exponents without
# any character and mixed whitespace separators
text = (
"A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n "
+ "0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330"
)
table = ascii.read(
text, guess=guess, fast_reader={"parallel": parallel, "exponent_style": "A"}
)
assert_table_equal(table, expc, rtol=rtol, atol=atol)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_invalid_exp(parallel, guess):
"""
Test Fortran-style exponential notation in the fast_reader with invalid
exponent-like patterns (no triple-digits) to make sure they are returned
as strings instead, as with the standard C parser.
"""
if parallel and CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
formats = {"basic": " ", "tab": "\t", "csv": ","}
header = ["S1", "F2", "S2", "F3", "S3", "F4", "F5", "S4", "I1", "F6", "F7"]
# Tested entries and expected returns, first for auto-detect,
# then for different specified exponents
# fmt: off
fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.',
'2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314']
vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314']
vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308]
vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
# fmt: on
# Iterate over supported format types and separators
for f, s in formats.items():
t1 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
format=f,
guess=guess,
fast_reader={"parallel": parallel, "exponent_style": "A"},
)
assert_table_equal(t1, Table([[col] for col in vals_a], names=header))
# Non-basic separators require guessing enabled to be detected
if guess:
formats["bar"] = "|"
else:
formats = {"basic": " "}
for s in formats.values():
t2 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
guess=guess,
fast_reader={"parallel": parallel, "exponent_style": "a"},
)
assert_table_equal(t2, Table([[col] for col in vals_a], names=header))
# Iterate for (default) expchar 'E'
for s in formats.values():
t3 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
guess=guess,
fast_reader={"parallel": parallel, "use_fast_converter": True},
)
assert_table_equal(t3, Table([[col] for col in vals_e], names=header))
# Iterate for expchar 'D'
for s in formats.values():
t4 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
guess=guess,
fast_reader={"parallel": parallel, "exponent_style": "D"},
)
assert_table_equal(t4, Table([[col] for col in vals_d], names=header))
# Iterate for regular converter (strtod)
for s in formats.values():
t5 = ascii.read(
StringIO(s.join(header) + "\n" + s.join(fields)),
guess=guess,
fast_reader={"parallel": parallel, "use_fast_converter": False},
)
read_values = [col[0] for col in t5.itercols()]
if os.name == "nt":
# Apparently C strtod() on (some?) MSVC recognizes 'd' exponents!
assert read_values in (vals_v, vals_e)
else:
assert read_values == vals_e
def test_fortran_reader_notbasic():
"""
Check if readers without a fast option raise a value error when a
fast_reader is asked for (implies the default 'guess=True').
"""
tabstr = dedent(
"""
a b
1 1.23D4
2 5.67D-8
"""
)[1:-1]
t1 = ascii.read(tabstr.split("\n"), fast_reader={"exponent_style": "D"})
assert t1["b"].dtype.kind == "f"
tabrdb = dedent(
"""
a\tb
# A simple RDB table
N\tN
1\t 1.23D4
2\t 5.67-008
"""
)[1:-1]
t2 = ascii.read(
tabrdb.split("\n"), format="rdb", fast_reader={"exponent_style": "fortran"}
)
assert t2["b"].dtype.kind == "f"
tabrst = dedent(
"""
= =======
a b
= =======
1 1.23E4
2 5.67E-8
= =======
"""
)[1:-1]
t3 = ascii.read(tabrst.split("\n"), format="rst")
assert t3["b"].dtype.kind == "f"
t4 = ascii.read(tabrst.split("\n"), guess=True)
assert t4["b"].dtype.kind == "f"
# In the special case of fast_converter=True (the default),
# incompatibility is ignored
t5 = ascii.read(tabrst.split("\n"), format="rst", fast_reader=True)
assert t5["b"].dtype.kind == "f"
with pytest.raises(ParameterError):
ascii.read(tabrst.split("\n"), format="rst", guess=False, fast_reader="force")
with pytest.raises(ParameterError):
ascii.read(
tabrst.split("\n"),
format="rst",
guess=False,
fast_reader={"use_fast_converter": False},
)
tabrst = tabrst.replace("E", "D")
with pytest.raises(ParameterError):
ascii.read(
tabrst.split("\n"),
format="rst",
guess=False,
fast_reader={"exponent_style": "D"},
)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize(
"fast_reader", [{"exponent_style": "D"}, {"exponent_style": "A"}]
)
def test_dict_kwarg_integrity(fast_reader, guess):
"""
Check if dictionaries passed as kwargs (fast_reader in this test) are
left intact by ascii.read()
"""
expstyle = fast_reader.get("exponent_style", "E")
fields = ["10.1D+199", "3.14d+313", "2048d+306", "0.6D-325", "-2.d345"]
ascii.read(StringIO(" ".join(fields)), guess=guess, fast_reader=fast_reader)
assert fast_reader.get("exponent_style", None) == expstyle
@pytest.mark.parametrize(
"fast_reader", [False, {"parallel": True}, {"parallel": False}]
)
def test_read_empty_basic_table_with_comments(fast_reader):
"""
Test for reading a "basic" format table that has no data but has comments.
Tests the fix for #8267.
"""
dat = """
# comment 1
# comment 2
col1 col2
"""
t = ascii.read(dat, fast_reader=fast_reader)
assert t.meta["comments"] == ["comment 1", "comment 2"]
assert len(t) == 0
assert t.colnames == ["col1", "col2"]
@pytest.mark.parametrize(
"fast_reader", [{"use_fast_converter": True}, {"exponent_style": "A"}]
)
def test_conversion_fast(fast_reader):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = ascii.read(text, fast_reader=fast_reader)
assert_equal(table["A"].dtype.kind, "f")
assert table["B"].dtype.kind in ("S", "U")
assert_equal(table["C"].dtype.kind, "i")
assert_equal(table["D"].dtype.kind, "f")
assert table["E"].dtype.kind in ("S", "U")
assert table["F"].dtype.kind in ("S", "U")
assert table["G"].dtype.kind in ("S", "U")
assert table["H"].dtype.kind in ("S", "U")
@pytest.mark.parametrize("delimiter", ["\n", "\r"])
@pytest.mark.parametrize("fast_reader", [False, True, "force"])
def test_newline_as_delimiter(delimiter, fast_reader):
"""
Check that newline characters are correctly handled as delimiters.
Tests the fix for #9928.
"""
if delimiter == "\r":
eol = "\n"
else:
eol = "\r"
inp0 = ["a | b | c ", " 1 | '2' | 3.00000 "]
inp1 = "a {0:s} b {0:s}c{1:s} 1 {0:s}'2'{0:s} 3.0".format(delimiter, eol)
inp2 = [f"a {delimiter} b{delimiter} c", f"1{delimiter} '2' {delimiter} 3.0"]
t0 = ascii.read(inp0, delimiter="|", fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read(inp2, delimiter=delimiter, fast_reader=fast_reader)
assert t1.colnames == t2.colnames == ["a", "b", "c"]
assert len(t1) == len(t2) == 1
assert t1["b"].dtype.kind in ("S", "U")
assert t2["b"].dtype.kind in ("S", "U")
assert_table_equal(t1, t0)
assert_table_equal(t2, t0)
inp0 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format("|", eol)
inp1 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format(delimiter, eol)
t0 = ascii.read(inp0, delimiter="|", fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
if not fast_reader:
pytest.xfail("Quoted fields are not parsed correctly by BaseSplitter")
assert_equal(t1["b"].dtype.kind, "i")
@pytest.mark.parametrize("delimiter", [" ", "|", "\n", "\r"])
@pytest.mark.parametrize("fast_reader", [False, True, "force"])
def test_single_line_string(delimiter, fast_reader):
"""
String input without a newline character is interpreted as filename,
unless element of an iterable. Maybe not logical, but test that it is
at least treated consistently.
"""
expected = Table([[1], [2], [3.00]], names=("col1", "col2", "col3"))
text = "1{0:s}2{0:s}3.0".format(delimiter)
if delimiter in ("\r", "\n"):
t1 = ascii.read(
text, format="no_header", delimiter=delimiter, fast_reader=fast_reader
)
assert_table_equal(t1, expected)
else:
# Windows raises OSError, but not the other OSes.
with pytest.raises((FileNotFoundError, OSError)):
t1 = ascii.read(
text, format="no_header", delimiter=delimiter, fast_reader=fast_reader
)
t2 = ascii.read(
[text], format="no_header", delimiter=delimiter, fast_reader=fast_reader
)
assert_table_equal(t2, expected)
|
5311396e49e03a2649f52907bb2e3aa506c1653509da7550a3d42d3499775d22 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import locale
import pathlib
import platform
import re
from collections import OrderedDict
from io import BytesIO, StringIO
import numpy as np
import pytest
from astropy import table
from astropy.io import ascii
from astropy.io.ascii import core
from astropy.io.ascii.core import convert_numpy
from astropy.io.ascii.ui import _probably_html, get_read_trace
from astropy.table import MaskedColumn, Table
from astropy.table.table_helpers import simple_table
from astropy.units import Unit
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
from astropy.utils.data import get_pkg_data_path
from astropy.utils.exceptions import AstropyWarning
# setup/teardown function to have the tests run in the correct directory
from .common import setup_function # noqa: F401
from .common import teardown_function # noqa: F401
from .common import assert_almost_equal, assert_equal, assert_true
def asciiIO(x):
return BytesIO(x.encode("ascii"))
@pytest.fixture
def home_is_data(monkeypatch, request):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory.
"""
path = get_pkg_data_path("data")
# For Unix
monkeypatch.setenv("HOME", path)
# For Windows
monkeypatch.setenv("USERPROFILE", path)
@pytest.mark.parametrize(
"fast_reader",
[True, False, {"use_fast_converter": False}, {"use_fast_converter": True}, "force"],
)
def test_convert_overflow(fast_reader):
"""
Test reading an extremely large integer, which falls through to
string due to an overflow error (#2234). The C parsers used to
return inf (kind 'f') for this.
"""
expected_kind = "U"
with pytest.warns(
AstropyWarning, match="OverflowError converting to IntType in column a"
):
dat = ascii.read(
["a", "1" * 10000], format="basic", fast_reader=fast_reader, guess=False
)
assert dat["a"].dtype.kind == expected_kind
def test_read_specify_converters_with_names():
"""
Exact example from #9701: When using ascii.read with both the names and
converters arguments, the converters dictionary ignores the user-supplied
names and requires that you know the guessed names.
"""
csv_text = ["a,b,c", "1,2,3", "4,5,6"]
names = ["A", "B", "C"]
converters = {
"A": [ascii.convert_numpy(float)],
"B": [ascii.convert_numpy(int)],
"C": [ascii.convert_numpy(str)],
}
t = ascii.read(csv_text, format="csv", names=names, converters=converters)
assert t["A"].dtype.kind == "f"
assert t["B"].dtype.kind == "i"
assert t["C"].dtype.kind == "U"
def test_read_remove_and_rename_columns():
csv_text = ["a,b,c", "1,2,3", "4,5,6"]
reader = ascii.get_reader(Reader=ascii.Csv)
reader.read(csv_text)
header = reader.header
with pytest.raises(KeyError, match="Column NOT-EXIST does not exist"):
header.remove_columns(["NOT-EXIST"])
header.remove_columns(["c"])
assert header.colnames == ("a", "b")
header.rename_column("a", "aa")
assert header.colnames == ("aa", "b")
with pytest.raises(KeyError, match="Column NOT-EXIST does not exist"):
header.rename_column("NOT-EXIST", "aa")
def test_guess_with_names_arg():
"""
Make sure reading a table with guess=True gives the expected result when
the names arg is specified.
"""
# This is a NoHeader format table and so `names` should replace
# the default col0, col1 names. It fails as a Basic format
# table when guessing because the column names would be '1', '2'.
dat = ascii.read(["1,2", "3,4"], names=("a", "b"))
assert len(dat) == 2
assert dat.colnames == ["a", "b"]
# This is a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(["c,d", "3,4"], names=("a", "b"))
assert len(dat) == 1
assert dat.colnames == ["a", "b"]
# This is also a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(["c d", "e f"], names=("a", "b"))
assert len(dat) == 1
assert dat.colnames == ["a", "b"]
def test_guess_with_format_arg():
"""
When the format or Reader is explicitly given then disable the
strict column name checking in guessing.
"""
dat = ascii.read(["1,2", "3,4"], format="basic")
assert len(dat) == 1
assert dat.colnames == ["1", "2"]
dat = ascii.read(["1,2", "3,4"], names=("a", "b"), format="basic")
assert len(dat) == 1
assert dat.colnames == ["a", "b"]
dat = ascii.read(["1,2", "3,4"], Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ["1", "2"]
dat = ascii.read(["1,2", "3,4"], names=("a", "b"), Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ["a", "b"]
# For good measure check the same in the unified I/O interface
dat = Table.read(["1,2", "3,4"], format="ascii.basic")
assert len(dat) == 1
assert dat.colnames == ["1", "2"]
dat = Table.read(["1,2", "3,4"], format="ascii.basic", names=("a", "b"))
assert len(dat) == 1
assert dat.colnames == ["a", "b"]
def test_guess_with_delimiter_arg():
"""
When the delimiter is explicitly given then do not try others in guessing.
"""
fields = ["10.1E+19", "3.14", "2048", "-23"]
values = [1.01e20, 3.14, 2048, -23]
# Default guess should recognise CSV with optional spaces
t0 = ascii.read(asciiIO(", ".join(fields)), guess=True)
for n, v in zip(t0.colnames, values):
assert t0[n][0] == v
# Forcing space as delimiter produces type str columns ('10.1E+19,')
t1 = ascii.read(asciiIO(", ".join(fields)), guess=True, delimiter=" ")
for n, v in zip(t1.colnames[:-1], fields[:-1]):
assert t1[n][0] == v + ","
def test_reading_mixed_delimiter_tabs_spaces():
# Regression test for https://github.com/astropy/astropy/issues/6770
dat = ascii.read("1 2\t3\n1 2\t3", format="no_header", names=list("abc"))
assert len(dat) == 2
Table.read(["1 2\t3", "1 2\t3"], format="ascii.no_header", names=["a", "b", "c"])
assert len(dat) == 2
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_read_with_names_arg(fast_reader):
"""
Test that a bad value of `names` raises an exception.
"""
# CParser only uses columns in `names` and thus reports mismatch in num_col
with pytest.raises(ascii.InconsistentTableError):
ascii.read(["c d", "e f"], names=("a",), guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
@pytest.mark.parametrize("path_format", ["plain", "tilde-str", "tilde-pathlib"])
def test_read_all_files(fast_reader, path_format, home_is_data):
for testfile in get_testfiles():
if testfile.get("skip"):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if "tilde" in path_format:
if "str" in path_format:
testfile["name"] = "~/" + testfile["name"][5:]
else:
testfile["name"] = pathlib.Path("~/", testfile["name"][5:])
print(f"\n\n******** READING {testfile['name']}")
for guess in (True, False):
test_opts = testfile["opts"].copy()
if "guess" not in test_opts:
test_opts["guess"] = guess
if (
"Reader" in test_opts
and f"fast_{test_opts['Reader']._format_name}" in core.FAST_CLASSES
): # has fast version
if "Inputter" not in test_opts: # fast reader doesn't allow this
test_opts["fast_reader"] = fast_reader
table = ascii.read(testfile["name"], **test_opts)
assert_equal(table.dtype.names, testfile["cols"])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile["nrows"])
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
@pytest.mark.parametrize("path_format", ["plain", "tilde-str", "tilde-pathlib"])
def test_read_all_files_via_table(fast_reader, path_format, home_is_data):
for testfile in get_testfiles():
if testfile.get("skip"):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if "tilde" in path_format:
if "str" in path_format:
testfile["name"] = "~/" + testfile["name"][5:]
else:
testfile["name"] = pathlib.Path("~/", testfile["name"][5:])
print(f"\n\n******** READING {testfile['name']}")
for guess in (True, False):
test_opts = testfile["opts"].copy()
if "guess" not in test_opts:
test_opts["guess"] = guess
if "Reader" in test_opts:
format = f"ascii.{test_opts['Reader']._format_name}"
del test_opts["Reader"]
else:
format = "ascii"
if f"fast_{format}" in core.FAST_CLASSES:
test_opts["fast_reader"] = fast_reader
table = Table.read(testfile["name"], format=format, **test_opts)
assert_equal(table.dtype.names, testfile["cols"])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile["nrows"])
def test_guess_all_files():
for testfile in get_testfiles():
if testfile.get("skip"):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if not testfile["opts"].get("guess", True):
continue
print(f"\n\n******** READING {testfile['name']}")
for filter_read_opts in (["Reader", "delimiter", "quotechar"], []):
# Copy read options except for those in filter_read_opts
guess_opts = {
k: v for k, v in testfile["opts"].items() if k not in filter_read_opts
}
table = ascii.read(testfile["name"], guess=True, **guess_opts)
assert_equal(table.dtype.names, testfile["cols"])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile["nrows"])
def test_validate_read_kwargs():
lines = ["a b", "1 2", "3 4"]
# Check that numpy integers are allowed
out = ascii.read(lines, data_start=np.int16(2))
assert np.all(out["a"] == [3])
with pytest.raises(
TypeError,
match=r"read\(\) argument 'data_end' must be a "
r"<class 'int'> object, "
r"got <class 'str'> instead",
):
ascii.read(lines, data_end="needs integer")
with pytest.raises(
TypeError,
match=r"read\(\) argument 'fill_include_names' must "
r"be a list-like object, got <class 'str'> instead",
):
ascii.read(lines, fill_include_names="ID")
def test_daophot_indef():
"""Test that INDEF is correctly interpreted as a missing value"""
table = ascii.read("data/daophot2.dat", Reader=ascii.Daophot)
for col in table.itercols():
# Four columns have all INDEF values and are masked, rest are normal Column
if col.name in ("OTIME", "MAG", "MERR", "XAIRMASS"):
assert np.all(col.mask)
else:
assert not hasattr(col, "mask")
def test_daophot_types():
"""
Test specific data types which are different from what would be
inferred automatically based only data values. DAOphot reader uses
the header information to assign types.
"""
table = ascii.read("data/daophot2.dat", Reader=ascii.Daophot)
assert table["LID"].dtype.char in "fd" # float or double
assert table["MAG"].dtype.char in "fd" # even without any data values
assert (
table["PIER"].dtype.char in "US"
) # string (data values are consistent with int)
assert table["ID"].dtype.char in "il" # int or long
def test_daophot_header_keywords():
table = ascii.read("data/daophot.dat", Reader=ascii.Daophot)
expected_keywords = (
("NSTARFILE", "test.nst.1", "filename", "%-23s"),
("REJFILE", '"hello world"', "filename", "%-23s"),
("SCALE", "1.", "units/pix", "%-23.7g"),
)
keywords = table.meta["keywords"] # Ordered dict of keyword structures
for name, value, units, format_ in expected_keywords:
keyword = keywords[name]
assert_equal(keyword["value"], value)
assert_equal(keyword["units"], units)
assert_equal(keyword["format"], format_)
def test_daophot_multiple_aperture():
table = ascii.read("data/daophot3.dat", Reader=ascii.Daophot)
assert "MAG5" in table.colnames # MAG5 is one of the newly created column names
assert table["MAG5"][4] == 22.13 # A sample entry in daophot3.dat file
assert table["MERR2"][0] == 1.171
assert np.all(
table["RAPERT5"] == 23.3
) # assert all the 5th apertures are same 23.3
def test_daophot_multiple_aperture2():
table = ascii.read("data/daophot4.dat", Reader=ascii.Daophot)
assert "MAG15" in table.colnames # MAG15 is one of the newly created column name
assert table["MAG15"][1] == -7.573 # A sample entry in daophot4.dat file
assert table["MERR2"][0] == 0.049
assert np.all(table["RAPERT5"] == 5.0) # assert all the 5th apertures are same 5.0
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_empty_table_no_header(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read(
"data/no_data_without_header.dat",
Reader=ascii.NoHeader,
guess=False,
fast_reader=fast_reader,
)
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_wrong_quote(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read("data/simple.txt", guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_extra_data_col(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read("data/bad.txt", fast_reader=fast_reader)
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_extra_data_col2(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read("data/simple5.txt", delimiter="|", fast_reader=fast_reader)
def test_missing_file():
with pytest.raises(OSError):
ascii.read("does_not_exist")
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_set_names(fast_reader):
names = ("c1", "c2", "c3", "c4", "c5", "c6")
data = ascii.read(
"data/simple3.txt", names=names, delimiter="|", fast_reader=fast_reader
)
assert_equal(data.dtype.names, names)
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_set_include_names(fast_reader):
names = ("c1", "c2", "c3", "c4", "c5", "c6")
include_names = ("c1", "c3")
data = ascii.read(
"data/simple3.txt",
names=names,
include_names=include_names,
delimiter="|",
fast_reader=fast_reader,
)
assert_equal(data.dtype.names, include_names)
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_set_exclude_names(fast_reader):
exclude_names = ("Y", "object")
data = ascii.read(
"data/simple3.txt",
exclude_names=exclude_names,
delimiter="|",
fast_reader=fast_reader,
)
assert_equal(data.dtype.names, ("obsid", "redshift", "X", "rad"))
def test_include_names_daophot():
include_names = ("ID", "MAG", "PIER")
data = ascii.read("data/daophot.dat", include_names=include_names)
assert_equal(data.dtype.names, include_names)
def test_exclude_names_daophot():
exclude_names = ("ID", "YCENTER", "MERR", "NITER", "CHI", "PERROR")
data = ascii.read("data/daophot.dat", exclude_names=exclude_names)
assert_equal(data.dtype.names, ("XCENTER", "MAG", "MSKY", "SHARPNESS", "PIER"))
def test_custom_process_lines():
def process_lines(lines):
bars_at_ends = re.compile(r"^\| | \|$", re.VERBOSE)
striplines = (x.strip() for x in lines)
return [bars_at_ends.sub("", x) for x in striplines if len(x) > 0]
reader = ascii.get_reader(delimiter="|")
reader.inputter.process_lines = process_lines
data = reader.read("data/bars_at_ends.txt")
assert_equal(data.dtype.names, ("obsid", "redshift", "X", "Y", "object", "rad"))
assert_equal(len(data), 3)
def test_custom_process_line():
def process_line(line):
line_out = re.sub(r"^\|\s*", "", line.strip())
return line_out
reader = ascii.get_reader(data_start=2, delimiter="|")
reader.header.splitter.process_line = process_line
reader.data.splitter.process_line = process_line
data = reader.read("data/nls1_stackinfo.dbout")
cols = get_testfiles("data/nls1_stackinfo.dbout")["cols"]
assert_equal(data.dtype.names, cols[1:])
def test_custom_splitters():
reader = ascii.get_reader()
reader.header.splitter = ascii.BaseSplitter()
reader.data.splitter = ascii.BaseSplitter()
f = "data/test4.dat"
data = reader.read(f)
testfile = get_testfiles(f)
assert_equal(data.dtype.names, testfile["cols"])
assert_equal(len(data), testfile["nrows"])
assert_almost_equal(data.field("zabs1.nh")[2], 0.0839710433091)
assert_almost_equal(data.field("p1.gamma")[2], 1.25997502704)
assert_almost_equal(data.field("p1.ampl")[2], 0.000696444029148)
assert_equal(data.field("statname")[2], "chi2modvar")
assert_almost_equal(data.field("statval")[2], 497.56468441)
def test_start_end():
data = ascii.read("data/test5.dat", header_start=1, data_start=3, data_end=-5)
assert_equal(len(data), 13)
assert_equal(data.field("statname")[0], "chi2xspecvar")
assert_equal(data.field("statname")[-1], "chi2gehrels")
def test_set_converters():
converters = {
"zabs1.nh": [ascii.convert_numpy("int32"), ascii.convert_numpy("float32")],
"p1.gamma": [ascii.convert_numpy("str")],
}
data = ascii.read("data/test4.dat", converters=converters)
assert_equal(str(data["zabs1.nh"].dtype), "float32")
assert_equal(data["p1.gamma"][0], "1.26764500000")
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_from_string(fast_reader):
f = "data/simple.txt"
with open(f) as fd:
table = fd.read()
testfile = get_testfiles(f)[0]
data = ascii.read(table, fast_reader=fast_reader, **testfile["opts"])
assert_equal(data.dtype.names, testfile["cols"])
assert_equal(len(data), testfile["nrows"])
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_from_filelike(fast_reader):
f = "data/simple.txt"
testfile = get_testfiles(f)[0]
with open(f, "rb") as fd:
data = ascii.read(fd, fast_reader=fast_reader, **testfile["opts"])
assert_equal(data.dtype.names, testfile["cols"])
assert_equal(len(data), testfile["nrows"])
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_from_lines(fast_reader):
f = "data/simple.txt"
with open(f) as fd:
table = fd.readlines()
testfile = get_testfiles(f)[0]
data = ascii.read(table, fast_reader=fast_reader, **testfile["opts"])
assert_equal(data.dtype.names, testfile["cols"])
assert_equal(len(data), testfile["nrows"])
def test_comment_lines():
table = ascii.get_reader(Reader=ascii.Rdb)
data = table.read("data/apostrophe.rdb")
assert_equal(table.comment_lines, ["# first comment", " # second comment"])
assert_equal(data.meta["comments"], ["first comment", "second comment"])
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_fill_values(fast_reader):
f = "data/fill_values.txt"
testfile = get_testfiles(f)
data = ascii.read(
f, fill_values=("a", "1"), fast_reader=fast_reader, **testfile["opts"]
)
assert_true((data["a"].mask == [False, True]).all())
assert_true((data["a"] == [1, 1]).all())
assert_true((data["b"].mask == [False, True]).all())
assert_true((data["b"] == [2, 1]).all())
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_fill_values_col(fast_reader):
f = "data/fill_values.txt"
testfile = get_testfiles(f)
data = ascii.read(
f, fill_values=("a", "1", "b"), fast_reader=fast_reader, **testfile["opts"]
)
check_fill_values(data)
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_fill_values_include_names(fast_reader):
f = "data/fill_values.txt"
testfile = get_testfiles(f)
data = ascii.read(
f,
fill_values=("a", "1"),
fast_reader=fast_reader,
fill_include_names=["b"],
**testfile["opts"],
)
check_fill_values(data)
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_fill_values_exclude_names(fast_reader):
f = "data/fill_values.txt"
testfile = get_testfiles(f)
data = ascii.read(
f,
fill_values=("a", "1"),
fast_reader=fast_reader,
fill_exclude_names=["a"],
**testfile["opts"],
)
check_fill_values(data)
def check_fill_values(data):
"""compare array column by column with expectation"""
assert not hasattr(data["a"], "mask")
assert_true((data["a"] == ["1", "a"]).all())
assert_true((data["b"].mask == [False, True]).all())
# Check that masked value is "do not care" in comparison
assert_true((data["b"] == [2, -999]).all())
data["b"].mask = False # explicitly unmask for comparison
assert_true((data["b"] == [2, 1]).all())
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_fill_values_list(fast_reader):
f = "data/fill_values.txt"
testfile = get_testfiles(f)
data = ascii.read(
f,
fill_values=[("a", "42"), ("1", "42", "a")],
fast_reader=fast_reader,
**testfile["opts"],
)
data["a"].mask = False # explicitly unmask for comparison
assert_true((data["a"] == [42, 42]).all())
def test_masking_Cds_Mrt():
f = "data/cds.dat" # Tested for CDS and MRT
for testfile in get_testfiles(f):
data = ascii.read(f, **testfile["opts"])
assert_true(data["AK"].mask[0])
assert not hasattr(data["Fit"], "mask")
def test_null_Ipac():
f = "data/ipac.dat"
testfile = get_testfiles(f)[0]
data = ascii.read(f, **testfile["opts"])
mask = np.array(
[(True, False, True, False, True), (False, False, False, False, False)],
dtype=[
("ra", "|b1"),
("dec", "|b1"),
("sai", "|b1"),
("v2", "|b1"),
("sptype", "|b1"),
],
)
assert np.all(data.mask == mask)
def test_Ipac_meta():
keywords = OrderedDict(
(
("intval", 1),
("floatval", 2.3e3),
("date", "Wed Sp 20 09:48:36 1995"),
("key_continue", "IPAC keywords can continue across lines"),
)
)
comments = ["This is an example of a valid comment"]
f = "data/ipac.dat"
testfile = get_testfiles(f)[0]
data = ascii.read(f, **testfile["opts"])
assert data.meta["keywords"].keys() == keywords.keys()
for data_kv, kv in zip(data.meta["keywords"].values(), keywords.values()):
assert data_kv["value"] == kv
assert data.meta["comments"] == comments
def test_set_guess_kwarg():
"""Read a file using guess with one of the typical guess_kwargs explicitly set."""
data = ascii.read("data/space_delim_no_header.dat", delimiter=",", guess=True)
assert data.dtype.names == ("1 3.4 hello",)
assert len(data) == 1
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_read_rdb_wrong_type(fast_reader):
"""Read RDB data with inconsistent data type (except failure)"""
table = """col1\tcol2
N\tN
1\tHello"""
with pytest.raises(ValueError):
ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader)
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_default_missing(fast_reader):
"""
Read a table with empty values and ensure that corresponding entries are masked
"""
table = "\n".join( # noqa: FLY002
[
"a,b,c,d",
"1,3,,",
"2, , 4.0 , ss ",
]
)
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.masked is False
assert dat.pformat() == [
" a b c d ",
"--- --- --- ---",
" 1 3 -- --",
" 2 -- 4.0 ss",
]
# Single row table with a single missing element
table = """ a \n "" """
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.pformat() == [" a ", "---", " --"]
assert dat["a"].dtype.kind == "i"
# Same test with a fixed width reader
table = "\n".join( # noqa: FLY002
[
" a b c d ",
"--- --- --- ---",
" 1 3 ",
" 2 4.0 ss",
]
)
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine)
assert dat.masked is False
assert dat.pformat() == [
" a b c d ",
"--- --- --- ---",
" 1 3 -- --",
" 2 -- 4.0 ss",
]
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None)
assert dat.masked is False
assert dat.pformat() == [
" a b c d ",
"--- --- --- ---",
" 1 3 ",
" 2 4.0 ss",
]
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[])
assert dat.masked is False
assert dat.pformat() == [
" a b c d ",
"--- --- --- ---",
" 1 3 ",
" 2 4.0 ss",
]
def get_testfiles(name=None):
"""Set up information about the columns, number of rows, and reader params to
read a bunch of test files and verify columns and number of rows."""
testfiles = [
{
"cols": ("agasc_id", "n_noids", "n_obs"),
"name": "data/apostrophe.rdb",
"nrows": 2,
"opts": {"Reader": ascii.Rdb},
},
{
"cols": ("agasc_id", "n_noids", "n_obs"),
"name": "data/apostrophe.tab",
"nrows": 2,
"opts": {"Reader": ascii.Tab},
},
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/cds.dat",
"nrows": 1,
"opts": {"Reader": ascii.Cds},
},
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/cds.dat",
"nrows": 1,
"opts": {"Reader": ascii.Mrt},
},
# Test malformed CDS file (issues #2241 #467)
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/cds_malformed.dat",
"nrows": 1,
"opts": {"Reader": ascii.Cds, "data_start": "guess"},
},
{
"cols": ("a", "b", "c"),
"name": "data/commented_header.dat",
"nrows": 2,
"opts": {"Reader": ascii.CommentedHeader},
},
{
"cols": ("a", "b", "c"),
"name": "data/commented_header2.dat",
"nrows": 2,
"opts": {"Reader": ascii.CommentedHeader, "header_start": -1},
},
{
"cols": ("col1", "col2", "col3", "col4", "col5"),
"name": "data/continuation.dat",
"nrows": 2,
"opts": {
"Inputter": ascii.ContinuationLinesInputter,
"Reader": ascii.NoHeader,
},
},
{
"cols": (
"ID",
"XCENTER",
"YCENTER",
"MAG",
"MERR",
"MSKY",
"NITER",
"SHARPNESS",
"CHI",
"PIER",
"PERROR",
),
"name": "data/daophot.dat",
"nrows": 2,
"opts": {"Reader": ascii.Daophot},
},
{
"cols": (
"NUMBER",
"FLUX_ISO",
"FLUXERR_ISO",
"VALU-ES",
"VALU-ES_1",
"FLAG",
),
"name": "data/sextractor.dat",
"nrows": 3,
"opts": {"Reader": ascii.SExtractor},
},
{
"cols": ("ra", "dec", "sai", "v2", "sptype"),
"name": "data/ipac.dat",
"nrows": 2,
"opts": {"Reader": ascii.Ipac},
},
{
"cols": (
"col0",
"objID",
"osrcid",
"xsrcid",
"SpecObjID",
"ra",
"dec",
"obsid",
"ccdid",
"z",
"modelMag_i",
"modelMagErr_i",
"modelMag_r",
"modelMagErr_r",
"expo",
"theta",
"rad_ecf_39",
"detlim90",
"fBlim90",
),
"name": "data/nls1_stackinfo.dbout",
"nrows": 58,
"opts": {"data_start": 2, "delimiter": "|", "guess": False},
},
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/no_data_cds.dat",
"nrows": 0,
"opts": {"Reader": ascii.Cds},
},
{
"cols": (
"Index",
"RAh",
"RAm",
"RAs",
"DE-",
"DEd",
"DEm",
"DEs",
"Match",
"Class",
"AK",
"Fit",
),
"name": "data/no_data_cds.dat",
"nrows": 0,
"opts": {"Reader": ascii.Mrt},
},
{
"cols": (
"ID",
"XCENTER",
"YCENTER",
"MAG",
"MERR",
"MSKY",
"NITER",
"SHARPNESS",
"CHI",
"PIER",
"PERROR",
),
"name": "data/no_data_daophot.dat",
"nrows": 0,
"opts": {"Reader": ascii.Daophot},
},
{
"cols": ("NUMBER", "FLUX_ISO", "FLUXERR_ISO", "VALUES", "VALUES_1", "FLAG"),
"name": "data/no_data_sextractor.dat",
"nrows": 0,
"opts": {"Reader": ascii.SExtractor},
},
{
"cols": ("ra", "dec", "sai", "v2", "sptype"),
"name": "data/no_data_ipac.dat",
"nrows": 0,
"opts": {"Reader": ascii.Ipac},
},
{
"cols": ("ra", "v2"),
"name": "data/ipac.dat",
"nrows": 2,
"opts": {"Reader": ascii.Ipac, "include_names": ["ra", "v2"]},
},
{
"cols": ("a", "b", "c"),
"name": "data/no_data_with_header.dat",
"nrows": 0,
"opts": {},
},
{
"cols": ("agasc_id", "n_noids", "n_obs"),
"name": "data/short.rdb",
"nrows": 7,
"opts": {"Reader": ascii.Rdb},
},
{
"cols": ("agasc_id", "n_noids", "n_obs"),
"name": "data/short.tab",
"nrows": 7,
"opts": {"Reader": ascii.Tab},
},
{
"cols": ("test 1a", "test2", "test3", "test4"),
"name": "data/simple.txt",
"nrows": 2,
"opts": {"quotechar": "'"},
},
{
"cols": ("top1", "top2", "top3", "top4"),
"name": "data/simple.txt",
"nrows": 1,
"opts": {"quotechar": "'", "header_start": 1, "data_start": 2},
},
{
"cols": ("top1", "top2", "top3", "top4"),
"name": "data/simple.txt",
"nrows": 1,
"opts": {"quotechar": "'", "header_start": 1},
},
{
"cols": ("top1", "top2", "top3", "top4"),
"name": "data/simple.txt",
"nrows": 2,
"opts": {"quotechar": "'", "header_start": 1, "data_start": 1},
},
{
"cols": ("obsid", "redshift", "X", "Y", "object", "rad"),
"name": "data/simple2.txt",
"nrows": 3,
"opts": {"delimiter": "|"},
},
{
"cols": ("obsid", "redshift", "X", "Y", "object", "rad"),
"name": "data/simple3.txt",
"nrows": 2,
"opts": {"delimiter": "|"},
},
{
"cols": ("col1", "col2", "col3", "col4", "col5", "col6"),
"name": "data/simple4.txt",
"nrows": 3,
"opts": {"Reader": ascii.NoHeader, "delimiter": "|"},
},
{
"cols": ("col1", "col2", "col3"),
"name": "data/space_delim_no_header.dat",
"nrows": 2,
"opts": {"Reader": ascii.NoHeader},
},
{
"cols": ("col1", "col2", "col3"),
"name": "data/space_delim_no_header.dat",
"nrows": 2,
"opts": {"Reader": ascii.NoHeader, "header_start": None},
},
{
"cols": ("obsid", "offset", "x", "y", "name", "oaa"),
"name": "data/space_delim_blank_lines.txt",
"nrows": 3,
"opts": {},
},
{
"cols": ("zabs1.nh", "p1.gamma", "p1.ampl", "statname", "statval"),
"name": "data/test4.dat",
"nrows": 9,
"opts": {},
},
{
"cols": ("a", "b", "c"),
"name": "data/fill_values.txt",
"nrows": 2,
"opts": {"delimiter": ","},
},
{
"name": "data/whitespace.dat",
"cols": ("quoted colname with tab\tinside", "col2", "col3"),
"nrows": 2,
"opts": {"delimiter": r"\s"},
},
{
"name": "data/simple_csv.csv",
"cols": ("a", "b", "c"),
"nrows": 2,
"opts": {"Reader": ascii.Csv},
},
{
"name": "data/simple_csv_missing.csv",
"cols": ("a", "b", "c"),
"nrows": 2,
"skip": True,
"opts": {"Reader": ascii.Csv},
},
{
"cols": ("cola", "colb", "colc"),
"name": "data/latex1.tex",
"nrows": 2,
"opts": {"Reader": ascii.Latex},
},
{
"cols": ("Facility", "Id", "exposure", "date"),
"name": "data/latex2.tex",
"nrows": 3,
"opts": {"Reader": ascii.AASTex},
},
{
"cols": ("cola", "colb", "colc"),
"name": "data/latex3.tex",
"nrows": 2,
"opts": {"Reader": ascii.Latex},
},
{
"cols": ("Col1", "Col2", "Col3", "Col4"),
"name": "data/fixed_width_2_line.txt",
"nrows": 2,
"opts": {"Reader": ascii.FixedWidthTwoLine},
},
]
try:
import bs4 # noqa: F401
testfiles.append(
{
"cols": ("Column 1", "Column 2", "Column 3"),
"name": "data/html.html",
"nrows": 3,
"opts": {"Reader": ascii.HTML},
}
)
except ImportError:
pass
if name is not None:
# If there are multiple matches then return a list, else return just
# the one match.
out = [x for x in testfiles if x["name"] == name]
if len(out) == 1:
out = out[0]
else:
out = testfiles
return out
def test_header_start_exception():
"""Check certain Readers throw an exception if ``header_start`` is set
For certain Readers it does not make sense to set the ``header_start``, they
throw an exception if you try.
This was implemented in response to issue #885.
"""
for readerclass in [
ascii.NoHeader,
ascii.SExtractor,
ascii.Ipac,
ascii.BaseReader,
ascii.FixedWidthNoHeader,
ascii.Cds,
ascii.Mrt,
ascii.Daophot,
]:
with pytest.raises(ValueError):
ascii.core._get_reader(readerclass, header_start=5)
def test_csv_table_read():
"""
Check for a regression introduced by #1935. Pseudo-CSV file with
commented header line.
"""
lines = ["# a, b", "1, 2", "3, 4"]
t = ascii.read(lines)
assert t.colnames == ["a", "b"]
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_overlapping_names(fast_reader):
"""
Check that the names argument list can overlap with the existing column names.
This tests the issue in #1991.
"""
t = ascii.read(["a b", "1 2"], names=["b", "a"], fast_reader=fast_reader)
assert t.colnames == ["b", "a"]
def test_sextractor_units():
"""
Make sure that the SExtractor reader correctly inputs descriptions and units.
"""
table = ascii.read("data/sextractor2.dat", Reader=ascii.SExtractor, guess=False)
expected_units = [
None,
Unit("pix"),
Unit("pix"),
Unit("mag"),
Unit("mag"),
None,
Unit("pix**2"),
Unit("m**(-6)"),
Unit("mag * arcsec**(-2)"),
]
expected_descrs = [
"Running object number",
"Windowed position estimate along x",
"Windowed position estimate along y",
"Kron-like elliptical aperture magnitude",
"RMS error for AUTO magnitude",
"Extraction flags",
None,
"Barycenter position along MAMA x axis",
"Peak surface brightness above background",
]
for i, colname in enumerate(table.colnames):
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_sextractor_last_column_array():
"""
Make sure that the SExtractor reader handles the last column correctly when it is array-like.
"""
table = ascii.read("data/sextractor3.dat", Reader=ascii.SExtractor, guess=False)
expected_columns = [
"X_IMAGE",
"Y_IMAGE",
"ALPHA_J2000",
"DELTA_J2000",
"MAG_AUTO",
"MAGERR_AUTO",
"MAG_APER",
"MAG_APER_1",
"MAG_APER_2",
"MAG_APER_3",
"MAG_APER_4",
"MAG_APER_5",
"MAG_APER_6",
"MAGERR_APER",
"MAGERR_APER_1",
"MAGERR_APER_2",
"MAGERR_APER_3",
"MAGERR_APER_4",
"MAGERR_APER_5",
"MAGERR_APER_6",
]
expected_units = [
Unit("pix"),
Unit("pix"),
Unit("deg"),
Unit("deg"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
Unit("mag"),
]
expected_descrs = (
[
"Object position along x",
None,
"Right ascension of barycenter (J2000)",
"Declination of barycenter (J2000)",
"Kron-like elliptical aperture magnitude",
"RMS error for AUTO magnitude",
]
+ ["Fixed aperture magnitude vector"] * 7
+ ["RMS error vector for fixed aperture mag."] * 7
)
for i, colname in enumerate(table.colnames):
assert table[colname].name == expected_columns[i]
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_list_with_newlines():
"""
Check that lists of strings where some strings consist of just a newline
("\n") are parsed correctly.
"""
t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"])
assert t.colnames == ["abc"]
assert len(t) == 2
assert t[0][0] == 123
assert t[1][0] == 456
def test_commented_csv():
"""
Check that Csv reader does not have ignore lines with the # comment
character which is defined for most Basic readers.
"""
t = ascii.read(["#a,b", "1,2", "#3,4"], format="csv")
assert t.colnames == ["#a", "b"]
assert len(t) == 2
assert t["#a"][1] == "#3"
def test_meta_comments():
"""
Make sure that line comments are included in the ``meta`` attribute
of the output Table.
"""
t = ascii.read(["#comment1", "# comment2 \t", "a,b,c", "1,2,3"])
assert t.colnames == ["a", "b", "c"]
assert t.meta["comments"] == ["comment1", "comment2"]
def test_guess_fail():
"""
Check the error message when guess fails
"""
with pytest.raises(ascii.InconsistentTableError) as err:
ascii.read("asfdasdf\n1 2 3", format="basic")
assert "** To figure out why the table did not read, use guess=False and" in str(
err.value
)
# Test the case with guessing enabled but for a format that has no free params
with pytest.raises(ValueError) as err:
ascii.read("asfdasdf\n1 2 3", format="ipac")
assert (
"At least one header line beginning and ending with delimiter required"
in str(err.value)
)
# Test the case with guessing enabled but with all params specified
with pytest.raises(ValueError) as err:
ascii.read(
"asfdasdf\n1 2 3",
format="basic",
quotechar='"',
delimiter=" ",
fast_reader=False,
)
assert "Number of header columns (1) inconsistent with data columns (3)" in str(
err.value
)
@pytest.mark.xfail(not HAS_BZ2, reason="requires bz2")
def test_guessing_file_object():
"""
Test guessing a file object. Fixes #3013 and similar issue noted in #3019.
"""
with open("data/ipac.dat.bz2", "rb") as fd:
t = ascii.read(fd)
assert t.colnames == ["ra", "dec", "sai", "v2", "sptype"]
def test_pformat_roundtrip():
"""Check that the screen output of ``print tab`` can be read. See #3025."""
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = "\n".join( # noqa: FLY002
[
"a,b,c,d",
"1,3,1.11,1",
"2, 2, 4.0 , ss ",
]
)
dat = ascii.read(table)
out = ascii.read(dat.pformat())
assert len(dat) == len(out)
assert dat.colnames == out.colnames
for c in dat.colnames:
assert np.all(dat[c] == out[c])
def test_ipac_abbrev():
lines = [
"| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|",
"| r | rE | rea | real | D | do | dou | f | i | l | da| c |",
" 1 2 3 4 5 6 7 8 9 10 11 12 ",
]
dat = ascii.read(lines, format="ipac")
for name in dat.columns[0:8]:
assert dat[name].dtype.kind == "f"
for name in dat.columns[8:10]:
assert dat[name].dtype.kind == "i"
for name in dat.columns[10:12]:
assert dat[name].dtype.kind in ("U", "S")
def test_almost_but_not_quite_daophot():
"""Regression test for #3319.
This tables looks so close to a daophot table, that the daophot reader gets
quite far before it fails with an AttributeError.
Note that this table will actually be read as Commented Header table with
the columns ['some', 'header', 'info'].
"""
lines = [
"# some header info",
"#F header info beginning with 'F'",
"1 2 3",
"4 5 6",
"7 8 9",
]
dat = ascii.read(lines)
assert len(dat) == 3
@pytest.mark.parametrize("fast", [False, "force"])
def test_commented_header_comments(fast):
"""
Test that comments in commented_header are as expected with header_start
at different positions, and that the table round-trips.
"""
comments = ["comment 1", "comment 2", "comment 3"]
lines = ["# a b", "# comment 1", "# comment 2", "# comment 3", "1 2", "3 4"]
dat = ascii.read(lines, format="commented_header", fast_reader=fast)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
out = StringIO()
ascii.write(dat, out, format="commented_header", fast_writer=fast)
assert out.getvalue().splitlines() == lines
lines.insert(1, lines.pop(0))
dat = ascii.read(lines, format="commented_header", header_start=1, fast_reader=fast)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
lines.insert(2, lines.pop(1))
dat = ascii.read(lines, format="commented_header", header_start=2, fast_reader=fast)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
dat = ascii.read(
lines, format="commented_header", header_start=-2, fast_reader=fast
)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
lines.insert(3, lines.pop(2))
dat = ascii.read(
lines, format="commented_header", header_start=-1, fast_reader=fast
)
assert dat.meta["comments"] == comments
assert dat.colnames == ["a", "b"]
lines = ["# a b", "1 2", "3 4"]
dat = ascii.read(lines, format="commented_header", fast_reader=fast)
assert "comments" not in dat.meta
assert dat.colnames == ["a", "b"]
def test_probably_html(home_is_data):
"""
Test the routine for guessing if a table input to ascii.read is probably HTML
"""
for tabl0 in (
"data/html.html",
"~/html.html",
"http://blah.com/table.html",
"https://blah.com/table.html",
"file://blah/table.htm",
"ftp://blah.com/table.html",
"file://blah.com/table.htm",
" <! doctype html > hello world",
"junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk",
[
"junk < table baz>",
" <tr foo >",
" <td bar> ",
"</td> </tr>",
"</table> junk",
],
(" <! doctype html > ", " hello world"),
):
assert _probably_html(tabl0) is True
for tabl0 in (
"data/html.htms",
"Xhttp://blah.com/table.html",
" https://blah.com/table.htm",
"fole://blah/table.htm",
" < doctype html > hello world",
"junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk",
[
"junk < table baz>",
" <t foo >",
" <td bar> ",
"</td> </tr>",
"</table> junk",
],
(" <! doctype htm > ", " hello world"),
[[1, 2, 3]],
):
assert _probably_html(tabl0) is False
@pytest.mark.parametrize("fast_reader", [True, False, "force"])
def test_data_header_start(fast_reader):
tests = [
(
[
"# comment",
"",
" ",
"skip this line", # line 0
"a b", # line 1
"1 2",
], # line 2
[{"header_start": 1}, {"header_start": 1, "data_start": 2}],
),
(
[
"# comment",
"",
" \t",
"skip this line", # line 0
"a b", # line 1
"",
" \t",
"skip this line", # line 2
"1 2",
], # line 3
[{"header_start": 1, "data_start": 3}],
),
(
[
"# comment",
"",
" ",
"a b", # line 0
"",
" ",
"skip this line", # line 1
"1 2",
], # line 2
[{"header_start": 0, "data_start": 2}, {"data_start": 2}],
),
]
for lines, kwargs_list in tests:
for kwargs in kwargs_list:
t = ascii.read(
lines, format="basic", fast_reader=fast_reader, guess=True, **kwargs
)
assert t.colnames == ["a", "b"]
assert len(t) == 1
assert np.all(t["a"] == [1])
# Sanity check that the expected Reader is being used
assert get_read_trace()[-1]["kwargs"]["Reader"] is (
ascii.Basic if (fast_reader is False) else ascii.FastBasic
)
def test_table_with_no_newline():
"""
Test that an input file which is completely empty fails in the expected way.
Test that an input file with one line but no newline succeeds.
"""
# With guessing
table = BytesIO()
with pytest.raises(ascii.InconsistentTableError):
ascii.read(table)
# Without guessing
table = BytesIO()
with pytest.raises(ValueError) as err:
ascii.read(table, guess=False, fast_reader=False, format="basic")
assert "No header line found" in str(err.value)
table = BytesIO()
t = ascii.read(table, guess=False, fast_reader=True, format="fast_basic")
assert not t and t.as_array().size == 0
# Put a single line of column names but with no newline
for kwargs in [
{},
{"guess": False, "fast_reader": False, "format": "basic"},
{"guess": False, "fast_reader": True, "format": "fast_basic"},
]:
table = BytesIO()
table.write(b"a b")
t = ascii.read(table, **kwargs)
assert t.colnames == ["a", "b"]
assert len(t) == 0
def test_path_object():
fpath = pathlib.Path("data/simple.txt")
data = ascii.read(fpath)
assert len(data) == 2
assert sorted(list(data.columns)) == ["test 1a", "test2", "test3", "test4"]
assert data["test2"][1] == "hat2"
def test_column_conversion_error():
"""
Test that context information (upstream exception message) from column
conversion error is provided.
"""
ipac = """\
| col0 |
| double |
1 2
"""
with pytest.raises(ValueError) as err:
ascii.read(ipac, guess=False, format="ipac")
assert "Column col0 failed to convert:" in str(err.value)
with pytest.raises(ValueError) as err:
ascii.read(["a b", "1 2"], guess=False, format="basic", converters={"a": []})
assert "no converters" in str(err.value)
def test_non_C_locale_with_fast_reader():
"""Test code that forces "C" locale while calling fast reader (#4364)"""
current = locale.setlocale(locale.LC_ALL)
try:
if platform.system() == "Darwin":
locale.setlocale(locale.LC_ALL, "fr_FR")
else:
locale.setlocale(locale.LC_ALL, "fr_FR.utf8")
for fast_reader in (
True,
False,
{"use_fast_converter": False},
{"use_fast_converter": True},
):
t = ascii.read(
["a b", "1.5 2"], format="basic", guess=False, fast_reader=fast_reader
)
assert t["a"].dtype.kind == "f"
except locale.Error as e:
pytest.skip(f"Locale error: {e}")
finally:
locale.setlocale(locale.LC_ALL, current)
def test_no_units_for_char_columns():
"""Test that a char column of a Table is assigned no unit and not
a dimensionless unit."""
t1 = Table([["A"]], names="B")
out = StringIO()
ascii.write(t1, out, format="ipac")
t2 = ascii.read(out.getvalue(), format="ipac", guess=False)
assert t2["B"].unit is None
def test_initial_column_fill_values():
"""Regression test for #5336, #5338."""
class TestHeader(ascii.BasicHeader):
def _set_cols_from_names(self):
self.cols = [ascii.Column(name=x) for x in self.names]
# Set some initial fill values
for col in self.cols:
col.fill_values = {"--": "0"}
class Tester(ascii.Basic):
header_class = TestHeader
reader = ascii.get_reader(Reader=Tester)
assert (
reader.read(
"""# Column definition is the first uncommented line
# Default delimiter is the space character.
a b c
# Data starts after the header column definition, blank lines ignored
-- 2 3
4 5 6 """
)["a"][0]
is np.ma.masked
)
def test_latex_no_trailing_backslash():
"""
Test that latex/aastex file with no trailing backslash can be read.
"""
lines = r"""
\begin{table}
\begin{tabular}{ccc}
a & b & c \\
1 & 1.0 & c \\ % comment
3\% & 3.0 & e % comment
\end{tabular}
\end{table}
"""
dat = ascii.read(lines, format="latex")
assert dat.colnames == ["a", "b", "c"]
assert np.all(dat["a"] == ["1", r"3\%"])
assert np.all(dat["c"] == ["c", "e"])
def text_aastex_no_trailing_backslash():
lines = r"""
\begin{deluxetable}{ccc}
\tablehead{\colhead{a} & \colhead{b} & \colhead{c}}
\startdata
1 & 1.0 & c \\
2 & 2.0 & d \\ % comment
3\% & 3.0 & e % comment
\enddata
\end{deluxetable}
"""
dat = ascii.read(lines, format="aastex")
assert dat.colnames == ["a", "b", "c"]
assert np.all(dat["a"] == ["1", r"3\%"])
assert np.all(dat["c"] == ["c", "e"])
@pytest.mark.parametrize("encoding", ["utf8", "latin1", "cp1252"])
def test_read_with_encoding(tmp_path, encoding):
data = {"commented_header": "# à b è \n 1 2 héllo", "csv": "à,b,è\n1,2,héllo"}
testfile = tmp_path / "test.txt"
for fmt, content in data.items():
with open(testfile, "w", encoding=encoding) as f:
f.write(content)
table = ascii.read(testfile, encoding=encoding)
assert table.pformat() == [" à b è ", "--- --- -----", " 1 2 héllo"]
for guess in (True, False):
table = ascii.read(
testfile, format=fmt, fast_reader=False, encoding=encoding, guess=guess
)
assert table["è"].dtype.kind == "U"
assert table.pformat() == [
" à b è ",
"--- --- -----",
" 1 2 héllo",
]
def test_unsupported_read_with_encoding():
# Fast reader is not supported, make sure it raises an exception
with pytest.raises(ascii.ParameterError):
ascii.read(
"data/simple3.txt",
guess=False,
fast_reader="force",
encoding="latin1",
format="fast_csv",
)
def test_read_chunks_input_types():
"""
Test chunked reading for different input types: file path, file object,
and string input.
"""
fpath = "data/test5.dat"
t1 = ascii.read(
fpath,
header_start=1,
data_start=3,
)
with open(fpath) as fd1, open(fpath) as fd2:
for fp in (fpath, fd1, fd2.read()):
t_gen = ascii.read(
fp,
header_start=1,
data_start=3,
guess=False,
format="fast_basic",
fast_reader={"chunk_size": 400, "chunk_generator": True},
)
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) == 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
with open(fpath) as fd1, open(fpath) as fd2:
for fp in (fpath, fd1, fd2.read()):
# Now read the full table in chunks
t3 = ascii.read(
fp, header_start=1, data_start=3, fast_reader={"chunk_size": 300}
)
assert np.all(t1 == t3)
@pytest.mark.parametrize("masked", [True, False])
def test_read_chunks_formats(masked):
"""
Test different supported formats for chunked reading.
"""
t1 = simple_table(size=102, cols=10, kinds="fS", masked=masked)
for i, name in enumerate(t1.colnames):
t1.rename_column(name, f"col{i + 1}")
# TO DO commented_header does not currently work due to the special-cased
# implementation of header parsing.
for format in "tab", "csv", "no_header", "rdb", "basic":
out = StringIO()
ascii.write(t1, out, format=format)
t_gen = ascii.read(
out.getvalue(),
format=format,
fast_reader={"chunk_size": 400, "chunk_generator": True},
)
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) > 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
# Now read the full table in chunks
t3 = ascii.read(out.getvalue(), format=format, fast_reader={"chunk_size": 400})
assert np.all(t1 == t3)
def test_read_chunks_chunk_size_too_small():
fpath = "data/test5.dat"
with pytest.raises(ValueError) as err:
ascii.read(fpath, header_start=1, data_start=3, fast_reader={"chunk_size": 10})
assert "no newline found in chunk (chunk_size too small?)" in str(err.value)
def test_read_chunks_table_changes():
"""Column changes type or size between chunks. This also tests the case with
no final newline.
"""
col = ["a b c"] + ["1.12334 xyz a"] * 50 + ["abcdefg 555 abc"] * 50
table = "\n".join(col)
t1 = ascii.read(table, guess=False)
t2 = ascii.read(table, fast_reader={"chunk_size": 100})
# This also confirms that the dtypes are exactly the same, i.e.
# the string itemsizes are the same.
assert np.all(t1 == t2)
def test_read_non_ascii():
"""Test that pure-Python reader is used in case the file contains non-ASCII characters
in it.
"""
table = Table.read(["col1, col2", "\u2119, \u01b4", "1, 2"], format="csv")
assert np.all(table["col1"] == ["\u2119", "1"])
assert np.all(table["col2"] == ["\u01b4", "2"])
@pytest.mark.parametrize("enable", [True, False, "force"])
def test_kwargs_dict_guess(enable):
"""Test that fast_reader dictionary is preserved through guessing sequence."""
# Fails for enable=(True, 'force') - #5578
ascii.read("a\tb\n 1\t2\n3\t 4.0", fast_reader={"enable": enable})
assert get_read_trace()[-1]["kwargs"]["Reader"] is (
ascii.Tab if (enable is False) else ascii.FastTab
)
for k in get_read_trace():
if not k.get("status", "Disabled").startswith("Disabled"):
assert k.get("kwargs").get("fast_reader").get("enable") is enable
def _get_lines(rdb):
lines = ["a a_2 a_1 a a"]
if rdb:
lines += ["N N N N N"]
lines += ["1 2 3 4 5", "10 20 30 40 50"]
if rdb:
lines = ["\t".join(line.split()) for line in lines]
return lines
@pytest.mark.parametrize("rdb", [False, True])
@pytest.mark.parametrize("fast_reader", [False, "force"])
def test_deduplicate_names_basic(rdb, fast_reader):
"""Test that duplicate column names are successfully de-duplicated for the
basic format. Skip the case of rdb=True and fast_reader='force' when selecting
include_names, since that fails and is tested below.
"""
lines = _get_lines(rdb)
dat = ascii.read(lines, fast_reader=fast_reader)
assert dat.colnames == ["a", "a_2", "a_1", "a_3", "a_4"]
assert len(dat) == 2
dat = ascii.read(lines, fast_reader=fast_reader, include_names=["a", "a_2", "a_3"])
assert len(dat) == 2
assert dat.colnames == ["a", "a_2", "a_3"]
assert np.all(dat["a"] == [1, 10])
assert np.all(dat["a_2"] == [2, 20])
assert np.all(dat["a_3"] == [4, 40])
dat = ascii.read(
lines,
fast_reader=fast_reader,
names=["b1", "b2", "b3", "b4", "b5"],
include_names=["b1", "b2", "a_4", "b4"],
)
assert len(dat) == 2
assert dat.colnames == ["b1", "b2", "b4"]
assert np.all(dat["b1"] == [1, 10])
assert np.all(dat["b2"] == [2, 20])
assert np.all(dat["b4"] == [4, 40])
dat = ascii.read(
lines,
fast_reader=fast_reader,
names=["b1", "b2", "b3", "b4", "b5"],
exclude_names=["b3", "b5", "a_3", "a_4"],
)
assert len(dat) == 2
assert dat.colnames == ["b1", "b2", "b4"]
assert np.all(dat["b1"] == [1, 10])
assert np.all(dat["b2"] == [2, 20])
assert np.all(dat["b4"] == [4, 40])
def test_include_names_rdb_fast():
"""Test that selecting column names via `include_names` works for the RDB format
with fast reader. This is testing the fix for a bug identified in #9939.
"""
lines = _get_lines(True)
lines[0] = "a\ta_2\ta_1\ta_3\ta_4"
dat = ascii.read(lines, fast_reader="force", include_names=["a", "a_2", "a_3"])
assert len(dat) == 2
assert dat["a"].dtype == int
assert dat["a_2"].dtype == int
@pytest.mark.parametrize("fast_reader", [False, "force"])
def test_deduplicate_names_with_types(fast_reader):
"""Test that on selecting column names via `include_names` in the RDB format with
different types and duplicate column names type assignment is correctly preserved.
"""
lines = _get_lines(True)
lines[1] = "N\tN\tN\tS\tS"
dat = ascii.read(lines, fast_reader=fast_reader, include_names=["a", "a_2", "a_3"])
assert len(dat) == 2
assert dat["a_2"].dtype.kind == "i"
assert dat["a_3"].dtype.kind == "U"
dat = ascii.read(
lines,
fast_reader=fast_reader,
names=["b1", "b2", "b3", "b4", "b5"],
include_names=["a1", "a_2", "b1", "b2", "b4"],
)
assert len(dat) == 2
assert dat.colnames == ["b1", "b2", "b4"]
assert dat["b2"].dtype.kind == "i"
assert dat["b4"].dtype.kind == "U"
@pytest.mark.parametrize("rdb", [False, True])
@pytest.mark.parametrize("fast_reader", [False, "force"])
def test_set_invalid_names(rdb, fast_reader):
"""
Test exceptions for invalid (duplicate or `None`) names specified via argument.
"""
lines = _get_lines(rdb)
if rdb:
fmt = "rdb"
else:
fmt = "basic"
with pytest.raises(ValueError) as err:
ascii.read(
lines,
fast_reader=fast_reader,
format=fmt,
guess=rdb,
names=["b1", "b2", "b1", "b4", "b5"],
)
assert "Duplicate column names" in str(err.value)
with pytest.raises(TypeError) as err:
ascii.read(
lines,
fast_reader=fast_reader,
format=fmt,
guess=rdb,
names=["b1", "b2", "b1", None, None],
)
assert "Cannot have None for column name" in str(err.value)
def test_read_masked_bool():
txt = """\
col0 col1
1 1
0 2
True 3
"" 4
False 5
"""
# Reading without converters returns col0 as a string
dat = ascii.read(txt, format="basic")
col = dat["col0"]
assert isinstance(col, MaskedColumn)
assert col.dtype.kind == "U"
assert col[0] == "1"
# Force col0 to be read as bool
converters = {"col0": [convert_numpy(bool)]}
dat = ascii.read(txt, format="basic", converters=converters)
col = dat["col0"]
assert isinstance(col, MaskedColumn)
assert col.dtype.kind == "b"
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
def test_read_converters_wildcard():
"""Test converters where the column name is specified with
a wildcard.
"""
converters = {"F*": [ascii.convert_numpy(np.float32)]}
t = ascii.read(["Fabc Iabc", "1 2"], converters=converters)
assert np.issubdtype(t["Fabc"].dtype, np.float32)
assert not np.issubdtype(t["Iabc"].dtype, np.float32)
def test_read_converters_simplified():
"""Test providing io.ascii read converters as type or dtypes instead of
convert_numpy(type) outputs"""
t = Table()
t["a"] = [1, 2]
t["b"] = [3.5, 4]
t["c"] = ["True", "False"]
t["d"] = ["true", "false"] # Looks kindof like boolean but actually a string
t["e"] = [5, 6]
out = StringIO()
t.write(out, format="ascii.basic")
converters = {"a": str, "e": np.float32}
t2 = Table.read(out.getvalue(), format="ascii.basic", converters=converters)
assert t2.pformat(show_dtype=True) == [
" a b c d e ",
"str1 float64 str5 str5 float32",
"---- ------- ----- ----- -------",
" 1 3.5 True true 5.0",
" 2 4.0 False false 6.0",
]
converters = {"a": float, "*": [np.int64, float, bool, str]}
t2 = Table.read(out.getvalue(), format="ascii.basic", converters=converters)
assert t2.pformat_all(show_dtype=True) == [
" a b c d e ",
"float64 float64 bool str5 int64",
"------- ------- ----- ----- -----",
" 1.0 3.5 True true 5",
" 2.0 4.0 False false 6",
]
# Test failures
for converters in (
{"*": [int, 1, bool, str]}, # bad converter type
# Tuple converter where 2nd element is not a subclass of NoType
{"a": [(int, int)]},
# Tuple converter with 3 elements not 2
{"a": [(int, int, int)]},
):
with pytest.raises(ValueError, match="Error: invalid format for converters"):
t2 = Table.read(
out.getvalue(), format="ascii.basic", converters=converters, guess=False
)
|
9388e6057deb98945690efaa7c5a110a94f2c4d26ed4ba7701b17565a21649c4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import pathlib
from contextlib import nullcontext
from io import StringIO
from itertools import chain
import numpy as np
import pytest
from astropy import table
from astropy import units as u
from astropy.io import ascii
from astropy.table.table_helpers import simple_table
from astropy.utils.compat.optional_deps import HAS_BS4
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .common import setup_function, teardown_function # noqa: F401
test_defs = [
{
"kwargs": {},
"out": """\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
},
{
"kwargs": {"delimiter": None},
"out": """\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
},
{
"kwargs": {
"formats": {"XCENTER": "%12.1f", "YCENTER": "{0:.1f}"},
"include_names": ["XCENTER", "YCENTER"],
"strip_whitespace": False,
},
"out": """\
XCENTER YCENTER
" 138.5" 256.4
" 18.1" 280.2
""",
},
{
"kwargs": {"Writer": ascii.Rdb, "exclude_names": ["CHI"]},
"out": """\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tPIER\tPERROR
N\tN\tN\tN\tN\tN\tN\tN\tN\tS
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t0\tNo_error
""",
},
{
"kwargs": {"Writer": ascii.Tab},
"out": """\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tCHI\tPIER\tPERROR
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0.802\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t1.104\t0\tNo_error
""",
},
{
"kwargs": {"Writer": ascii.Csv},
"out": """\
ID,XCENTER,YCENTER,MAG,MERR,MSKY,NITER,SHARPNESS,CHI,PIER,PERROR
14,138.538,256.405,15.461,0.003,34.85955,4,-0.032,0.802,0,No_error
18,18.114,280.170,22.329,0.206,30.12784,4,-2.544,1.104,0,No_error
""",
},
{
"kwargs": {"Writer": ascii.NoHeader},
"out": """\
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
},
{
"kwargs": {"Writer": ascii.CommentedHeader},
"out": """\
# ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
},
{
"kwargs": {"Writer": ascii.CommentedHeader, "comment": "&"},
"out": """\
&ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
},
{
"kwargs": {"Writer": ascii.Latex},
"out": """\
\\begin{table}
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
\\end{table}
""",
},
{
"kwargs": {"Writer": ascii.AASTex},
"out": """\
\\begin{deluxetable}{ccccccccccc}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{pixels} & \\colhead{pixels} & \\colhead{magnitudes} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable}
""",
},
{
"kwargs": {
"Writer": ascii.AASTex,
"caption": "Mag values \\label{tab1}",
"latexdict": {
"units": {"MAG": "[mag]", "XCENTER": "[pixel]"},
"tabletype": "deluxetable*",
"tablealign": "htpb",
},
},
"out": """\
\\begin{deluxetable*}{ccccccccccc}[htpb]
\\tablecaption{Mag values \\label{tab1}}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{[pixel]} & \\colhead{pixels} & \\colhead{[mag]} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable*}
""",
},
{
"kwargs": {
"Writer": ascii.Latex,
"caption": "Mag values \\label{tab1}",
"latexdict": {
"preamble": "\\begin{center}",
"tablefoot": "\\end{center}",
"data_end": ["\\hline", "\\hline"],
"units": {"MAG": "[mag]", "XCENTER": "[pixel]"},
"tabletype": "table*",
"tablealign": "h",
},
"col_align": "|lcccccccccc|",
},
"out": """\
\\begin{table*}[h]
\\begin{center}
\\caption{Mag values \\label{tab1}}
\\begin{tabular}{|lcccccccccc|}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& [pixel] & pixels & [mag] & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\hline
\\hline
\\end{tabular}
\\end{center}
\\end{table*}
""",
},
{
"kwargs": {"Writer": ascii.Latex, "latexdict": ascii.latexdicts["template"]},
"out": """\
\\begin{tabletype}[tablealign]
preamble
\\caption{caption}
\\begin{tabular}{col_align}
header_start
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
header_end
data_start
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
data_end
\\end{tabular}
tablefoot
\\end{tabletype}
""",
},
{
"kwargs": {"Writer": ascii.Latex, "latexdict": {"tabletype": None}},
"out": """\
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
""",
},
{
"kwargs": {
"Writer": ascii.HTML,
"htmldict": {"css": "table,th,td{border:1px solid black;"},
},
"out": """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
<style>
table,th,td{border:1px solid black; </style>
</head>
<body>
<table>
<thead>
<tr>
<th>ID</th>
<th>XCENTER</th>
<th>YCENTER</th>
<th>MAG</th>
<th>MERR</th>
<th>MSKY</th>
<th>NITER</th>
<th>SHARPNESS</th>
<th>CHI</th>
<th>PIER</th>
<th>PERROR</th>
</tr>
</thead>
<tr>
<td>14</td>
<td>138.538</td>
<td>256.405</td>
<td>15.461</td>
<td>0.003</td>
<td>34.85955</td>
<td>4</td>
<td>-0.032</td>
<td>0.802</td>
<td>0</td>
<td>No_error</td>
</tr>
<tr>
<td>18</td>
<td>18.114</td>
<td>280.170</td>
<td>22.329</td>
<td>0.206</td>
<td>30.12784</td>
<td>4</td>
<td>-2.544</td>
<td>1.104</td>
<td>0</td>
<td>No_error</td>
</tr>
</table>
</body>
</html>
""",
},
{
"kwargs": {"Writer": ascii.Ipac},
"out": """\
\\MERGERAD='INDEF'
\\IRAF='NOAO/IRAFV2.10EXPORT'
\\USER=''
\\HOST='tucana'
\\DATE='05-28-93'
\\TIME='14:46:13'
\\PACKAGE='daophot'
\\TASK='nstar'
\\IMAGE='test'
\\GRPFILE='test.psg.1'
\\PSFIMAGE='test.psf.1'
\\NSTARFILE='test.nst.1'
\\REJFILE='"hello world"'
\\SCALE='1.'
\\DATAMIN='50.'
\\DATAMAX='24500.'
\\GAIN='1.'
\\READNOISE='0.'
\\OTIME='00:07:59.0'
\\XAIRMASS='1.238106'
\\IFILTER='V'
\\RECENTER='yes'
\\FITSKY='no'
\\PSFMAG='16.594'
\\PSFRAD='5.'
\\FITRAD='3.'
\\MAXITER='50'
\\MAXGROUP='60'
\\FLATERROR='0.75'
\\PROFERROR='5.'
\\CLIPEXP='6'
\\CLIPRANGE='2.5'
| ID| XCENTER| YCENTER| MAG| MERR| MSKY| NITER| SHARPNESS| CHI| PIER| PERROR|
| long| double| double| double| double| double| long| double| double| long| char|
| | pixels| pixels| magnitudes| magnitudes| counts| | | | | perrors|
| null| null| null| null| null| null| null| null| null| null| null|
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""",
},
]
test_defs_no_data = [
{
"kwargs": {"Writer": ascii.Ipac},
"out": """\
\\ This is an example of a valid comment.
\\ The 2nd data line is used to verify the exact column parsing
\\ (unclear if this is a valid for the IPAC format)
\\catalog='sao'
\\date='Wed Sp 20 09:48:36 1995'
\\mykeyword='Another way for defining keyvalue string'
| ra| dec| sai| v2|sptype|
|double|double|long|double| char|
| unit| unit|unit| unit| ergs|
| null| null|null| null| null|
""",
},
]
tab_to_fill = ["a b c", "1 2 3", "1 1 3"]
test_defs_fill_value = [
{
"kwargs": {},
"out": """\
a b c
1 2 3
1 1 3
""",
},
{
"kwargs": {"fill_values": ("1", "w")},
"out": """\
a b c
w 2 3
w w 3
""",
},
{
"kwargs": {"fill_values": ("1", "w", "b")},
"out": """\
a b c
1 2 3
1 w 3
""",
},
{
"kwargs": {"fill_values": ("1", "w"), "fill_include_names": ["b"]},
"out": """\
a b c
1 2 3
1 w 3
""",
},
{
"kwargs": {"fill_values": ("1", "w"), "fill_exclude_names": ["a"]},
"out": """\
a b c
1 2 3
1 w 3
""",
},
{
"kwargs": {
"fill_values": ("1", "w"),
"fill_include_names": ["a"],
"fill_exclude_names": ["a", "b"],
},
"out": """\
a b c
1 2 3
1 1 3
""",
},
{
"kwargs": {"fill_values": [("1", "w")], "formats": {"a": "%4.2f"}},
"out": """\
a b c
1.00 2 3
1.00 w 3
""",
},
]
test_def_masked_fill_value = [
{
"kwargs": {},
"out": """\
a b c
"" 2 3
1 1 ""
""",
},
{
"kwargs": {"fill_values": [("1", "w"), (ascii.masked, "X")]},
"out": """\
a b c
X 2 3
w w X
""",
},
{
"kwargs": {
"fill_values": [("1", "w"), (ascii.masked, "XXX")],
"formats": {"a": "%4.1f"},
},
"out": """\
a b c
XXX 2 3
1.0 w XXX
""",
},
{
"kwargs": {"Writer": ascii.Csv},
"out": """\
a,b,c
,2,3
1,1,
""",
},
]
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
def check_write_table(test_def, table, fast_writer, out=None):
if out is None:
out = StringIO()
try:
ascii.write(table, out, fast_writer=fast_writer, **test_def["kwargs"])
except ValueError as e: # if format doesn't have a fast writer, ignore
if "not in the list of formats with fast writers" not in str(e.value):
raise e
return
if isinstance(out, StringIO):
# Output went to a buffer
actual = out.getvalue()
else:
# Output went to a file
if str(out).startswith("~"):
# Ensure a file hasn't been accidentally written to a literal tilde
# path
assert not os.path.exists(out)
out = os.path.expanduser(out)
assert os.path.exists(out)
with open(out) as f:
actual = f.read()
os.remove(out)
print(f"Expected:\n{test_def['out']}")
print(f"Actual:\n{actual}")
assert [x.strip() for x in actual.strip().splitlines()] == [
x.strip() for x in test_def["out"].strip().splitlines()
]
def check_write_table_via_table(test_def, table, fast_writer, out=None):
if out is None:
out = StringIO()
test_def = copy.deepcopy(test_def)
if "Writer" in test_def["kwargs"]:
format = f"ascii.{test_def['kwargs']['Writer']._format_name}"
del test_def["kwargs"]["Writer"]
else:
format = "ascii"
try:
table.write(out, format=format, fast_writer=fast_writer, **test_def["kwargs"])
except ValueError as e: # if format doesn't have a fast writer, ignore
if "not in the list of formats with fast writers" not in str(e.value):
raise e
return
if isinstance(out, StringIO):
# Output went to a buffer
actual = out.getvalue()
else:
# Output went to a file
if str(out).startswith("~"):
# Ensure a file hasn't been accidentally written to a literal tilde
# path
assert not os.path.exists(out)
out = os.path.expanduser(out)
assert os.path.exists(out)
with open(out) as f:
actual = f.read()
os.remove(out)
print(f"Expected:\n{test_def['out']}")
print(f"Actual:\n{actual}")
assert [x.strip() for x in actual.strip().splitlines()] == [
x.strip() for x in test_def["out"].strip().splitlines()
]
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize(
"path_format", ["buffer", "plain", "tilde-str", "tilde-pathlib"]
)
def test_write_table(fast_writer, tmp_path, home_is_tmpdir, path_format):
table = ascii.get_reader(Reader=ascii.Daophot)
data = table.read("data/daophot.dat")
if path_format == "buffer":
out_name = None
elif path_format == "plain":
out_name = tmp_path / "table"
elif path_format == "tilde-str":
out_name = os.path.join("~", "table")
else:
out_name = pathlib.Path("~", "table")
for test_def in test_defs:
check_write_table(test_def, data, fast_writer, out=out_name)
check_write_table_via_table(test_def, data, fast_writer, out=out_name)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_values(fast_writer):
data = ascii.read(tab_to_fill)
for test_def in test_defs_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_masked_different(fast_writer):
"""see discussion in #2255"""
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data["a"].mask = [True, False]
data["c"].mask = [False, True]
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_no_data_ipac(fast_writer):
"""Write an IPAC table that contains no data."""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
for test_def in test_defs_no_data:
check_write_table(test_def, data, fast_writer)
check_write_table_via_table(test_def, data, fast_writer)
def test_write_invalid_toplevel_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored in the top-level metadata and therefore should
raise a warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
data.meta["blah"] = "extra"
out = StringIO()
with pytest.warns(AstropyWarning, match=r".*were not written.*") as warn:
data.write(out, format="ascii.ipac")
assert len(warn) == 1
def test_write_invalid_keyword_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored appropriately in the ``keywords`` section
of the metadata but with invalid format and therefore should raise a
warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
data.meta["keywords"]["blah"] = "invalid"
out = StringIO()
with pytest.warns(AstropyWarning, match=r".*has been skipped.*") as warn:
data.write(out, format="ascii.ipac")
assert len(warn) == 1
def test_write_valid_meta_ipac():
"""Write an IPAC table that contains no data and has *correctly* specified
metadata. No warnings should be issued"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read("data/no_data_ipac.dat")
data.meta["keywords"]["blah"] = {"value": "invalid"}
out = StringIO()
data.write(out, format="ascii.ipac")
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_comments(fast_writer):
"""Write comments in output originally read by io.ascii."""
data = ascii.read("#c1\n # c2\t\na,b,c\n# c3\n1,2,3")
out = StringIO()
ascii.write(data, out, format="basic", fast_writer=fast_writer)
expected = ["# c1", "# c2", "# c3", "a b c", "1 2 3"]
assert out.getvalue().splitlines() == expected
# header comes before comments for commented-header
out = StringIO()
ascii.write(data, out, format="commented_header", fast_writer=fast_writer)
expected = ["# a b c", "# c1", "# c2", "# c3", "1 2 3"]
assert out.getvalue().splitlines() == expected
# setting comment=False should disable comment writing
out = StringIO()
ascii.write(data, out, format="basic", comment=False, fast_writer=fast_writer)
expected = ["a b c", "1 2 3"]
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize("fmt", ["%0.1f", ".1f", "0.1f", "{0:0.1f}"])
def test_write_format(fast_writer, fmt):
"""Check different formats for a column."""
data = ascii.read("#c1\n # c2\t\na,b,c\n# c3\n1.11,2.22,3.33")
out = StringIO()
expected = ["# c1", "# c2", "# c3", "a b c", "1.1 2.22 3.33"]
data["a"].format = fmt
ascii.write(data, out, format="basic", fast_writer=fast_writer)
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
def test_strip_names(fast_writer):
"""Names should be stripped of whitespace by default."""
data = table.Table([[1], [2], [3]], names=(" A", "B ", " C "))
out = StringIO()
ascii.write(data, out, format="csv", fast_writer=fast_writer)
assert out.getvalue().splitlines()[0] == "A,B,C"
def test_latex_units():
"""
Check to make sure that Latex and AASTex writers attempt to fall
back on the **unit** attribute of **Column** if the supplied
**latexdict** does not specify units.
"""
t = table.Table(
[
table.Column(name="date", data=["a", "b"]),
table.Column(name="NUV exp.time", data=[1, 2]),
]
)
latexdict = copy.deepcopy(ascii.latexdicts["AA"])
latexdict["units"] = {"NUV exp.time": "s"}
out = StringIO()
expected = """\
\\begin{table}{cc}
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2
\\enddata
\\end{table}
""".replace(
"\n", os.linesep
)
ascii.write(t, out, format="aastex", latexdict=latexdict)
assert out.getvalue() == expected
# use unit attribute instead
t["NUV exp.time"].unit = u.s
t["date"].unit = u.yr
out = StringIO()
ascii.write(t, out, format="aastex", latexdict=ascii.latexdicts["AA"])
assert out.getvalue() == expected.replace(
"colhead{s}", r"colhead{$\mathrm{s}$}"
).replace("colhead{ }", r"colhead{$\mathrm{yr}$}")
@pytest.mark.parametrize("fast_writer", [True, False])
def test_commented_header_comments(fast_writer):
"""
Test the fix for #3562 with confusing exception using comment=False
for the commented_header writer.
"""
t = table.Table([[1, 2]])
with pytest.raises(ValueError) as err:
out = StringIO()
ascii.write(
t, out, format="commented_header", comment=False, fast_writer=fast_writer
)
assert "for the commented_header writer you must supply a string" in str(err.value)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_byte_string_output(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([["Hello", "World"]], dtype=["S10"])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["col0", "Hello", "World"]
@pytest.mark.parametrize(
"names, include_names, exclude_names, formats, issues_warning",
[
(["x", "y"], ["x", "y"], ["x"], {"x": "%d", "y": "%f"}, True),
(["x", "y"], ["x", "y"], ["y"], {"x": "%d"}, False),
(["x", "y"], ["x", "y"], [], {"p": "%d", "q": "%f"}, True),
(["x", "y"], ["x", "y"], [], {"z": "%f"}, True),
(["x", "y"], ["x", "y"], [], {"x": "%d"}, False),
(["x", "y"], ["x", "y"], [], {"p": "%d", "y": "%f"}, True),
(["x", "y"], ["x", "y"], [], {}, False),
],
)
def test_names_with_formats(
names, include_names, exclude_names, formats, issues_warning
):
"""Test for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(
t,
out,
names=names,
include_names=include_names,
exclude_names=exclude_names,
formats=formats,
)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize(
"formats, issues_warning",
[
({"p": "%d", "y": "%f"}, True),
({"x": "%d", "y": "%f"}, True),
({"z": "%f"}, True),
({}, False),
],
)
def test_columns_names_with_formats(formats, issues_warning):
"""Test the fix for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(t, out, formats=formats)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_quoted_empty_field(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([["Hello", ""], ["", ""]], dtype=["S10", "S10"])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["col0 col1", 'Hello ""', '"" ""']
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer, delimiter=",")
assert out.getvalue().splitlines() == ["col0,col1", "Hello,", ","]
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_empty_table(fast_writer):
"""Test writing empty table #8275."""
t = table.Table([[]], dtype=["S2"])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["col0"]
@pytest.mark.parametrize(
"format", ["ascii", "csv", "html", "latex", "ascii.fixed_width", "html"]
)
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize("path_format", ["plain", "tilde-str", "tilde-pathlib"])
def test_write_overwrite_ascii(
format, fast_writer, tmp_path, home_is_tmpdir, path_format
):
"""Test overwrite argument for various ASCII writers"""
true_filename = tmp_path / "table-tmp.dat"
if path_format == "plain":
filename = true_filename
elif path_format == "tilde-str":
filename = os.path.join("~", "table-tmp.dat")
else:
filename = pathlib.Path("~", "table-tmp.dat")
with open(true_filename, "w"):
# create empty file
pass
t = table.Table([["Hello", ""], ["", ""]], dtype=["S10", "S10"])
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format=format, fast_writer=fast_writer)
t.write(filename, overwrite=True, format=format, fast_writer=fast_writer)
# If the output is a file object, overwrite is ignored
with open(true_filename, "w") as fp:
t.write(fp, overwrite=False, format=format, fast_writer=fast_writer)
t.write(fp, overwrite=True, format=format, fast_writer=fast_writer)
if "tilde" in path_format:
# Ensure no files have been accidentally written to a literal tilde path
assert not os.path.exists(filename)
fmt_name_classes = list(
chain(ascii.core.FAST_CLASSES.items(), ascii.core.FORMAT_CLASSES.items())
)
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_roundtrip_masked(fmt_name_class):
"""
Round trip a simple masked table through every writable format and confirm
that reading back gives the same result.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, "_io_registry_can_write", True):
return
# Skip tests for fixed_width or HTML without bs4
if (fmt_name == "html" and not HAS_BS4) or fmt_name == "fixed_width":
return
if "qdp" in fmt_name:
# QDP tables are for numeric values only
t = simple_table(masked=True, kinds=["f", "i"])
else:
t = simple_table(masked=True)
out = StringIO()
fast = fmt_name in ascii.core.FAST_CLASSES
try:
ascii.write(t, out, format=fmt_name, fast_writer=fast)
except ImportError: # Some failed dependency, skip test
return
# No-header formats need to be told the column names
kwargs = {"names": t.colnames} if "no_header" in fmt_name else {}
if "qdp" in fmt_name:
kwargs.update({"table_id": 0, "names": t.colnames})
t2 = ascii.read(
out.getvalue(), format=fmt_name, fast_reader=fast, guess=False, **kwargs
)
assert t.colnames == t2.colnames
for col, col2 in zip(t.itercols(), t2.itercols()):
assert col.dtype.kind == col2.dtype.kind
assert np.all(col == col2)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_newlines(fast_writer, tmp_path):
# Regression test for https://github.com/astropy/astropy/issues/5126
# On windows, when writing to a filename (not e.g. StringIO), newlines were
# \r\r\n instead of \r\n.
filename = tmp_path / "test"
t = table.Table([["a", "b", "c"]], names=["col"])
ascii.write(t, filename, fast_writer=fast_writer)
with open(filename, newline="") as f:
content = f.read()
assert content == os.linesep.join(["col", "a", "b", "c"]) + os.linesep
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_csv_with_comments(fast_writer):
"""
Test fix for #7357 where writing a Table with comments to 'csv' fails with
a cryptic message. The comments are dropped by default, but when comment='#'
is supplied they are still written.
"""
out = StringIO()
t = table.Table([[1, 2], [3, 4]], names=["a", "b"])
t.meta["comments"] = ["hello"]
ascii.write(t, out, format="csv", fast_writer=fast_writer)
assert out.getvalue().splitlines() == ["a,b", "1,3", "2,4"]
out = StringIO()
ascii.write(t, out, format="csv", fast_writer=fast_writer, comment="#")
assert out.getvalue().splitlines() == ["#hello", "a,b", "1,3", "2,4"]
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_formatted_mixin(fast_writer):
"""
Test fix for #8680 where writing a QTable with a quantity mixin generates
an exception if a format is specified.
"""
out = StringIO()
t = table.QTable([[1, 2], [1, 2] * u.m], names=["a", "b"])
ascii.write(t, out, fast_writer=fast_writer, formats={"a": "%02d", "b": "%.2f"})
assert out.getvalue().splitlines() == ["a b", "01 1.00", "02 2.00"]
def test_validate_write_kwargs():
out = StringIO()
t = table.QTable([[1, 2], [1, 2]], names=["a", "b"])
with pytest.raises(
TypeError,
match=r"write\(\) argument 'fast_writer' must be a "
r"\(<class 'bool'>, <class 'str'>\) object, "
r"got <class 'int'> instead",
):
ascii.write(t, out, fast_writer=12)
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_multidim_column_error(fmt_name_class):
"""
Test that trying to write a multidim column fails in every format except
ECSV.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, "_io_registry_can_write", True):
return
# Skip tests for ecsv or HTML without bs4. See the comment in latex.py
# Latex class where max_ndim = None is defined regarding latex and aastex.
if (fmt_name == "html" and not HAS_BS4) or fmt_name in ("ecsv", "latex", "aastex"):
return
out = StringIO()
t = table.Table()
t["a"] = np.arange(16).reshape(2, 2, 2, 2)
t["b"] = [1, 2]
fast = fmt_name in ascii.core.FAST_CLASSES
with pytest.raises(ValueError, match=r"column\(s\) with dimension"):
ascii.write(t, out, format=fmt_name, fast_writer=fast)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_as_columns(fast_writer):
"""
Test that writing a set of columns also roundtrips (as long as the
table does not have metadata, etc.)
"""
# Use masked in case that makes it more difficult.
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data["a"].mask = [True, False]
data["c"].mask = [False, True]
data = list(data.columns.values())
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
|
bd01e7811913c1e460b114f684ff08f528f00613524e8aa343b5e96b108fa886 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test :mod:`astropy.io.registry`.
.. todo::
Don't rely on Table for tests
"""
import os
from collections import Counter
from copy import deepcopy
from io import StringIO
import numpy as np
import pytest
import astropy.units as u
from astropy.io import registry as io_registry
from astropy.io.registry import (
IORegistryError,
UnifiedInputRegistry,
UnifiedIORegistry,
UnifiedOutputRegistry,
compat,
)
from astropy.io.registry.base import _UnifiedIORegistryBase
from astropy.io.registry.compat import default_registry
from astropy.table import Table
###############################################################################
# pytest setup and fixtures
class UnifiedIORegistryBaseSubClass(_UnifiedIORegistryBase):
"""Non-abstract subclass of UnifiedIORegistryBase for testing."""
def get_formats(self, data_class=None):
return None
class EmptyData:
"""
Thing that can read and write.
Note that the read/write are the compatibility methods, which allow for the
kwarg ``registry``. This allows us to not subclass ``EmptyData`` for each
of the types of registry (read-only, ...) and use this class everywhere.
"""
read = classmethod(io_registry.read)
write = io_registry.write
class OtherEmptyData:
"""A different class with different I/O"""
read = classmethod(io_registry.read)
write = io_registry.write
def empty_reader(*args, **kwargs):
return EmptyData()
def empty_writer(table, *args, **kwargs):
return "status: success"
def empty_identifier(*args, **kwargs):
return True
@pytest.fixture
def fmtcls1():
return ("test1", EmptyData)
@pytest.fixture
def fmtcls2():
return ("test2", EmptyData)
@pytest.fixture(params=["test1", "test2"])
def fmtcls(request):
yield (request.param, EmptyData)
@pytest.fixture
def original():
ORIGINAL = {}
ORIGINAL["readers"] = deepcopy(default_registry._readers)
ORIGINAL["writers"] = deepcopy(default_registry._writers)
ORIGINAL["identifiers"] = deepcopy(default_registry._identifiers)
return ORIGINAL
###############################################################################
def test_fmcls1_fmtcls2(fmtcls1, fmtcls2):
"""Just check a fact that we rely on in other tests."""
assert fmtcls1[1] is fmtcls2[1]
def test_IORegistryError():
with pytest.raises(IORegistryError, match="just checking"):
raise IORegistryError("just checking")
class TestUnifiedIORegistryBase:
"""Test :class:`astropy.io.registry.UnifiedIORegistryBase`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedIORegistryBaseSubClass
@pytest.fixture
def registry(self):
"""I/O registry. Cleaned before and after each function."""
registry = self._cls()
HAS_READERS = hasattr(registry, "_readers")
HAS_WRITERS = hasattr(registry, "_writers")
# copy and clear original registry
ORIGINAL = {}
ORIGINAL["identifiers"] = deepcopy(registry._identifiers)
registry._identifiers.clear()
if HAS_READERS:
ORIGINAL["readers"] = deepcopy(registry._readers)
registry._readers.clear()
if HAS_WRITERS:
ORIGINAL["writers"] = deepcopy(registry._writers)
registry._writers.clear()
yield registry
registry._identifiers.clear()
registry._identifiers.update(ORIGINAL["identifiers"])
if HAS_READERS:
registry._readers.clear()
registry._readers.update(ORIGINAL["readers"])
if HAS_WRITERS:
registry._writers.clear()
registry._writers.update(ORIGINAL["writers"])
# ===========================================
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
# defaults
assert registry.get_formats() is None
# (kw)args don't matter
assert registry.get_formats(data_class=24) is None
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
# TODO! figure out what can be tested
with registry.delay_doc_updates(EmptyData):
registry.register_identifier(*fmtcls1, empty_identifier)
def test_register_identifier(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_identifier()``."""
# initial check it's not registered
assert fmtcls1 not in registry._identifiers
assert fmtcls2 not in registry._identifiers
# register
registry.register_identifier(*fmtcls1, empty_identifier)
registry.register_identifier(*fmtcls2, empty_identifier)
assert fmtcls1 in registry._identifiers
assert fmtcls2 in registry._identifiers
def test_register_identifier_invalid(self, registry, fmtcls):
"""Test calling ``registry.register_identifier()`` twice."""
fmt, cls = fmtcls
registry.register_identifier(fmt, cls, empty_identifier)
with pytest.raises(IORegistryError) as exc:
registry.register_identifier(fmt, cls, empty_identifier)
assert (
str(exc.value) == f"Identifier for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_identifier_force(self, registry, fmtcls1):
registry.register_identifier(*fmtcls1, empty_identifier)
registry.register_identifier(*fmtcls1, empty_identifier, force=True)
assert fmtcls1 in registry._identifiers
# -----------------------
def test_unregister_identifier(self, registry, fmtcls1):
"""Test ``registry.unregister_identifier()``."""
registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in registry._identifiers
registry.unregister_identifier(*fmtcls1)
assert fmtcls1 not in registry._identifiers
def test_unregister_identifier_invalid(self, registry, fmtcls):
"""Test ``registry.unregister_identifier()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.unregister_identifier(fmt, cls)
assert (
str(exc.value)
== f"No identifier defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_identify_format(self, registry, fmtcls1):
"""Test ``registry.identify_format()``."""
fmt, cls = fmtcls1
args = (None, cls, None, None, (None,), {})
# test no formats to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
registry.register_identifier(fmt, cls, empty_identifier)
formats = registry.identify_format(*args)
assert fmt in formats
# ===========================================
# Compat tests
def test_compat_register_identifier(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._identifiers
compat.register_identifier(*fmtcls1, empty_identifier, registry=registry)
assert fmtcls1 in registry._identifiers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._identifiers
try:
compat.register_identifier(*fmtcls1, empty_identifier)
except Exception:
pass
else:
assert fmtcls1 in default_registry._identifiers
finally:
default_registry._identifiers.pop(fmtcls1)
def test_compat_unregister_identifier(self, registry, fmtcls1):
# with registry specified
registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in registry._identifiers
compat.unregister_identifier(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._identifiers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._identifiers
default_registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in default_registry._identifiers
compat.unregister_identifier(*fmtcls1)
assert fmtcls1 not in registry._identifiers
def test_compat_identify_format(self, registry, fmtcls1):
fmt, cls = fmtcls1
args = (None, cls, None, None, (None,), {})
# with registry specified
registry.register_identifier(*fmtcls1, empty_identifier)
formats = compat.identify_format(*args, registry=registry)
assert fmt in formats
# without registry specified it becomes default_registry
if registry is not default_registry:
try:
default_registry.register_identifier(*fmtcls1, empty_identifier)
except Exception:
pass
else:
formats = compat.identify_format(*args)
assert fmt in formats
finally:
default_registry.unregister_identifier(*fmtcls1)
@pytest.mark.skip("TODO!")
def test_compat_get_formats(self, registry, fmtcls1):
assert False
@pytest.mark.skip("TODO!")
def test_compat_delay_doc_updates(self, registry, fmtcls1):
assert False
class TestUnifiedInputRegistry(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedInputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedInputRegistry
# ===========================================
def test_inherited_read_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _read():
return EmptyData()
def _read1():
return Child1()
# check that reader gets inherited
registry.register_reader("test", EmptyData, _read)
assert registry.get_reader("test", Child2) is _read
# check that nearest ancestor is identified
# (i.e. that the reader for Child2 is the registered method
# for Child1, and not Table)
registry.register_reader("test", Child1, _read1)
assert registry.get_reader("test", Child2) is _read1
# ===========================================
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
with registry.delay_doc_updates(EmptyData):
registry.register_reader("test", EmptyData, empty_reader)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.read.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs) if ("Format" in s)][0]
ifmt = docs[ihd].index("Format") + 1
iread = docs[ihd].index("Read") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert docs[-1][ifmt : ifmt + 5] == "test"
assert docs[-1][iread : iread + 3] != "Yes"
# now test it's updated
docs = EmptyData.read.__doc__.split("\n")
ifmt = docs[ihd].index("Format") + 2
iread = docs[ihd].index("Read") + 1
assert docs[-2][ifmt : ifmt + 4] == "test"
assert docs[-2][iread : iread + 3] == "Yes"
def test_identify_read_format(self, registry):
"""Test ``registry.identify_format()``."""
args = ("read", EmptyData, None, None, (None,), {})
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a reader, it returns True for all
registry.register_identifier("test", EmptyData, empty_identifier)
formats = registry.identify_format(*args)
assert "test" in formats
# -----------------------
def test_register_reader(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_reader()``."""
# initial check it's not registered
assert fmtcls1 not in registry._readers
assert fmtcls2 not in registry._readers
# register
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls2, empty_reader)
assert fmtcls1 in registry._readers
assert fmtcls2 in registry._readers
assert registry._readers[fmtcls1] == (empty_reader, 0) # (f, priority)
assert registry._readers[fmtcls2] == (empty_reader, 0) # (f, priority)
def test_register_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
registry.register_reader(fmt, cls, empty_reader)
with pytest.raises(IORegistryError) as exc:
registry.register_reader(fmt, cls, empty_reader)
assert (
str(exc.value) == f"Reader for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_reader_force(self, registry, fmtcls1):
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls1, empty_reader, force=True)
assert fmtcls1 in registry._readers
def test_register_readers_with_same_name_on_different_classes(self, registry):
# No errors should be generated if the same name is registered for
# different objects...but this failed under python3
registry.register_reader("test", EmptyData, lambda: EmptyData())
registry.register_reader("test", OtherEmptyData, lambda: OtherEmptyData())
t = EmptyData.read(format="test", registry=registry)
assert isinstance(t, EmptyData)
tbl = OtherEmptyData.read(format="test", registry=registry)
assert isinstance(tbl, OtherEmptyData)
# -----------------------
def test_unregister_reader(self, registry, fmtcls1):
"""Test ``registry.unregister_reader()``."""
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
registry.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_unregister_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.unregister_reader(*fmtcls1)
assert (
str(exc.value)
== f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_get_reader(self, registry, fmtcls):
"""Test ``registry.get_reader()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError):
registry.get_reader(fmt, cls)
registry.register_reader(fmt, cls, empty_reader)
reader = registry.get_reader(fmt, cls)
assert reader is empty_reader
def test_get_reader_invalid(self, registry, fmtcls):
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.get_reader(fmt, cls)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_read_noformat(self, registry, fmtcls1):
"""Test ``registry.read()`` when there isn't a reader."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary_file(self, tmp_path, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._readers.update(original["readers"])
testfile = tmp_path / "foo.example"
with open(testfile, "w") as f:
f.write("Hello world")
with pytest.raises(IORegistryError) as exc:
Table.read(testfile)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_toomanyformats(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
cls.read(registry=registry)
assert str(exc.value) == f"Format is ambiguous - options are: {fmt1}, {fmt2}"
def test_read_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
counter = Counter()
def counting_reader1(*args, **kwargs):
counter[fmt1] += 1
return cls()
def counting_reader2(*args, **kwargs):
counter[fmt2] += 1
return cls()
registry.register_reader(fmt1, cls, counting_reader1, priority=1)
registry.register_reader(fmt2, cls, counting_reader2, priority=2)
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
cls.read(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_read_format_noreader(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls.read(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_read_identifier(self, tmp_path, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(
fmt1, cls, lambda o, path, fileobj, *x, **y: path.endswith("a")
)
registry.register_identifier(
fmt2, cls, lambda o, path, fileobj, *x, **y: path.endswith("b")
)
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_reader will fail but the error message
# will tell us if the identifier worked.
filename = tmp_path / "testfile.a"
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt1}' and class '{cls.__name__}'"
)
filename = tmp_path / "testfile.b"
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_read_valid_return(self, registry, fmtcls):
fmt, cls = fmtcls
registry.register_reader(fmt, cls, empty_reader)
t = cls.read(format=fmt, registry=registry)
assert isinstance(t, cls)
def test_read_non_existing_unknown_ext(self, fmtcls1):
"""Raise the correct error when attempting to read a non-existing
file with an unknown extension."""
with pytest.raises(OSError):
data = fmtcls1[1].read("non-existing-file-with-unknown.ext")
def test_read_directory(self, tmp_path, registry, fmtcls1):
"""
Regression test for a bug that caused the I/O registry infrastructure to
not work correctly for datasets that are represented by folders as
opposed to files, when using the descriptors to add read/write methods.
"""
_, cls = fmtcls1
registry.register_identifier(
"test_folder_format", cls, lambda o, *x, **y: o == "read"
)
registry.register_reader("test_folder_format", cls, empty_reader)
filename = tmp_path / "folder_dataset"
filename.mkdir()
# With the format explicitly specified
dataset = cls.read(filename, format="test_folder_format", registry=registry)
assert isinstance(dataset, cls)
# With the auto-format identification
dataset = cls.read(filename, registry=registry)
assert isinstance(dataset, cls)
# ===========================================
# Compat tests
def test_compat_register_reader(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._readers
compat.register_reader(*fmtcls1, empty_reader, registry=registry)
assert fmtcls1 in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
try:
compat.register_reader(*fmtcls1, empty_identifier)
except Exception:
pass
else:
assert fmtcls1 in default_registry._readers
finally:
default_registry._readers.pop(fmtcls1)
def test_compat_unregister_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
compat.unregister_reader(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
default_registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in default_registry._readers
compat.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_compat_get_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1, registry=registry)
assert reader is empty_reader
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1)
assert reader is empty_reader
default_registry.unregister_reader(*fmtcls1)
def test_compat_read(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt, registry=registry)
assert isinstance(t, cls)
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt)
assert isinstance(t, cls)
default_registry.unregister_reader(*fmtcls1)
class TestUnifiedOutputRegistry(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedOutputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedOutputRegistry
# ===========================================
def test_inherited_write_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _write():
return EmptyData()
def _write1():
return Child1()
# check that writer gets inherited
registry.register_writer("test", EmptyData, _write)
assert registry.get_writer("test", Child2) is _write
# check that nearest ancestor is identified
# (i.e. that the writer for Child2 is the registered method
# for Child1, and not Table)
registry.register_writer("test", Child1, _write1)
assert registry.get_writer("test", Child2) is _write1
# ===========================================
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
fmt, cls = fmtcls1
with registry.delay_doc_updates(EmptyData):
registry.register_writer(*fmtcls1, empty_writer)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.write.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs) if ("Format" in s)][0]
ifmt = docs[ihd].index("Format")
iwrite = docs[ihd].index("Write") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert fmt in docs[-1][ifmt : ifmt + len(fmt) + 1]
assert docs[-1][iwrite : iwrite + 3] != "Yes"
# now test it's updated
docs = EmptyData.write.__doc__.split("\n")
ifmt = docs[ihd].index("Format") + 1
iwrite = docs[ihd].index("Write") + 2
assert fmt in docs[-2][ifmt : ifmt + len(fmt) + 1]
assert docs[-2][iwrite : iwrite + 3] == "Yes"
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_identify_write_format(self, registry, fmtcls1):
"""Test ``registry.identify_format()``."""
fmt, cls = fmtcls1
args = ("write", cls, None, None, (None,), {})
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a writer, it returns True for all
registry.register_identifier(fmt, cls, empty_identifier)
formats = registry.identify_format(*args)
assert fmt in formats
# -----------------------
def test_register_writer(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_writer()``."""
# initial check it's not registered
assert fmtcls1 not in registry._writers
assert fmtcls2 not in registry._writers
# register
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls2, empty_writer)
assert fmtcls1 in registry._writers
assert fmtcls2 in registry._writers
def test_register_writer_invalid(self, registry, fmtcls):
"""Test calling ``registry.register_writer()`` twice."""
fmt, cls = fmtcls
registry.register_writer(fmt, cls, empty_writer)
with pytest.raises(IORegistryError) as exc:
registry.register_writer(fmt, cls, empty_writer)
assert (
str(exc.value) == f"Writer for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_writer_force(self, registry, fmtcls1):
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls1, empty_writer, force=True)
assert fmtcls1 in registry._writers
# -----------------------
def test_unregister_writer(self, registry, fmtcls1):
"""Test ``registry.unregister_writer()``."""
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in registry._writers
def test_unregister_writer_invalid(self, registry, fmtcls):
"""Test ``registry.unregister_writer()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.unregister_writer(fmt, cls)
assert (
str(exc.value)
== f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_get_writer(self, registry, fmtcls1):
"""Test ``registry.get_writer()``."""
with pytest.raises(IORegistryError):
registry.get_writer(*fmtcls1)
registry.register_writer(*fmtcls1, empty_writer)
writer = registry.get_writer(*fmtcls1)
assert writer is empty_writer
def test_get_writer_invalid(self, registry, fmtcls1):
"""Test invalid ``registry.get_writer()``."""
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.get_writer(fmt, cls)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_write_noformat(self, registry, fmtcls1):
"""Test ``registry.write()`` when there isn't a writer."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary_file(self, tmp_path, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._writers.update(original["writers"])
testfile = tmp_path / "foo.example"
with pytest.raises(IORegistryError) as exc:
Table().write(testfile, registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_toomanyformats(self, registry, fmtcls1, fmtcls2):
registry.register_identifier(*fmtcls1, lambda o, *x, **y: True)
registry.register_identifier(*fmtcls2, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert (
str(exc.value)
== f"Format is ambiguous - options are: {fmtcls1[0]}, {fmtcls2[0]}"
)
def test_write_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls1 = fmtcls1
fmt2, cls2 = fmtcls2
counter = Counter()
def counting_writer1(*args, **kwargs):
counter[fmt1] += 1
def counting_writer2(*args, **kwargs):
counter[fmt2] += 1
registry.register_writer(fmt1, cls1, counting_writer1, priority=1)
registry.register_writer(fmt2, cls2, counting_writer2, priority=2)
registry.register_identifier(fmt1, cls1, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls2, lambda o, *x, **y: True)
cls1().write(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_write_format_nowriter(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls().write(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_write_identifier(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: x[0].startswith("a"))
registry.register_identifier(fmt2, cls, lambda o, *x, **y: x[0].startswith("b"))
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_writer will fail but the error message
# will tell us if the identifier worked.
with pytest.raises(IORegistryError) as exc:
cls().write("abc", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt1}' and class '{cls.__name__}'"
)
with pytest.raises(IORegistryError) as exc:
cls().write("bac", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_write_return(self, registry, fmtcls1):
"""Most writers will return None, but other values are not forbidden."""
fmt, cls = fmtcls1
registry.register_writer(fmt, cls, empty_writer)
res = cls.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# ===========================================
# Compat tests
def test_compat_register_writer(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._writers
compat.register_writer(*fmtcls1, empty_writer, registry=registry)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
try:
compat.register_writer(*fmtcls1, empty_writer)
except Exception:
pass
else:
assert fmtcls1 in default_registry._writers
finally:
default_registry._writers.pop(fmtcls1)
def test_compat_unregister_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
compat.unregister_writer(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._writers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
compat.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_get_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
writer = compat.get_writer(*fmtcls1, registry=registry)
assert writer is empty_writer
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
writer = compat.get_writer(*fmtcls1)
assert writer is empty_writer
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_write(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
res = compat.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
res = compat.write(cls(), format=fmt)
assert res == "status: success"
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
class TestUnifiedIORegistry(TestUnifiedInputRegistry, TestUnifiedOutputRegistry):
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedIORegistry
# ===========================================
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
# -----------------------
def test_identifier_origin(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: o == "read")
registry.register_identifier(fmt2, cls, lambda o, *x, **y: o == "write")
registry.register_reader(fmt1, cls, empty_reader)
registry.register_writer(fmt2, cls, empty_writer)
# There should not be too many formats defined
cls.read(registry=registry)
cls().write(registry=registry)
with pytest.raises(IORegistryError) as exc:
cls.read(format=fmt2, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt2}' and class '{cls.__name__}'"
)
with pytest.raises(IORegistryError) as exc:
cls().write(format=fmt1, registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt1}' and class '{cls.__name__}'"
)
class TestDefaultRegistry(TestUnifiedIORegistry):
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = lambda *args: default_registry
# =============================================================================
# Test compat
# much of this is already tested above since EmptyData uses io_registry.X(),
# which are the compat methods.
def test_dir():
"""Test all the compat methods are in the directory"""
dc = dir(compat)
for n in compat.__all__:
assert n in dc
def test_getattr():
for n in compat.__all__:
assert hasattr(compat, n)
with pytest.raises(AttributeError, match="module 'astropy.io.registry.compat'"):
compat.this_is_definitely_not_in_this_module
# =============================================================================
# Table tests
def test_read_basic_table():
registry = Table.read._registry
data = np.array(
list(zip([1, 2, 3], ["a", "b", "c"])), dtype=[("A", int), ("B", "|U1")]
)
try:
registry.register_reader("test", Table, lambda x: Table(x))
except Exception:
pass
else:
t = Table.read(data, format="test")
assert t.keys() == ["A", "B"]
for i in range(3):
assert t["A"][i] == data["A"][i]
assert t["B"][i] == data["B"][i]
finally:
registry._readers.pop("test", None)
class TestSubclass:
"""
Test using registry with a Table sub-class
"""
@pytest.fixture(autouse=True)
def registry(self):
"""I/O registry. Not cleaned."""
yield
def test_read_table_subclass(self):
class MyTable(Table):
pass
data = ["a b", "1 2"]
mt = MyTable.read(data, format="ascii")
t = Table.read(data, format="ascii")
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(mt) is MyTable
def test_write_table_subclass(self):
buffer = StringIO()
class MyTable(Table):
pass
mt = MyTable([[1], [2]], names=["a", "b"])
mt.write(buffer, format="ascii")
assert buffer.getvalue() == os.linesep.join(["a b", "1 2", ""])
def test_read_table_subclass_with_columns_attributes(self, tmp_path):
"""Regression test for https://github.com/astropy/astropy/issues/7181"""
class MTable(Table):
pass
mt = MTable([[1, 2.5]], names=["a"])
mt["a"].unit = u.m
mt["a"].format = ".4f"
mt["a"].description = "hello"
testfile = tmp_path / "junk.fits"
mt.write(testfile, overwrite=True)
t = MTable.read(testfile)
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(t) is MTable
assert t["a"].unit == u.m
assert t["a"].format == "{:13.4f}"
assert t["a"].description == "hello"
|
187374a1de1686c8fbdccf165987460b4bef84530307a7a3fc7137820ed8d51e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import gzip
import io
import pathlib
import sys
from unittest import mock
import numpy as np
# THIRD-PARTY
import pytest
from numpy.testing import assert_array_equal
from astropy.io.votable import tree
from astropy.io.votable.exceptions import W39, VOTableSpecError, VOWarning
# LOCAL
from astropy.io.votable.table import parse, parse_single_table, validate
from astropy.io.votable.xmlutil import validate_schema
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames
# Determine the kind of float formatting in this build of Python
if hasattr(sys, "float_repr_style"):
legacy_float_repr = sys.float_repr_style == "legacy"
else:
legacy_float_repr = sys.platform.startswith("win")
def assert_validate_schema(filename, version):
if sys.platform.startswith("win"):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, "File did not validate against VOTable schema"
def test_parse_single_table():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table = parse_single_table(get_pkg_data_filename("data/regression.xml"))
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table2 = parse_single_table(
get_pkg_data_filename("data/regression.xml"), table_number=1
)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
def test_parse_single_table3():
with pytest.raises(IndexError):
parse_single_table(get_pkg_data_filename("data/regression.xml"), table_number=3)
def _test_regression(tmp_path, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename("data/regression.xml"),
_debug_python_based_parser=_python_based,
)
table = votable.get_first_table()
dtypes = [
(("string test", "string_test"), "|O8"),
(("fixed string test", "string_test_2"), "<U10"),
("unicode_test", "|O8"),
(("unicode test", "fixed_unicode_test"), "<U10"),
(("string array test", "string_array_test"), "<U4"),
("unsignedByte", "|u1"),
("short", "<i2"),
("int", "<i4"),
("long", "<i8"),
("double", "<f8"),
("float", "<f4"),
("array", "|O8"),
("bit", "|b1"),
("bitarray", "|b1", (3, 2)),
("bitvararray", "|O8"),
("bitvararray2", "|O8"),
("floatComplex", "<c8"),
("doubleComplex", "<c16"),
("doubleComplexArray", "|O8"),
("doubleComplexArrayFixed", "<c16", (2,)),
("boolean", "|b1"),
("booleanArray", "|b1", (4,)),
("nulls", "<i4"),
("nulls_array", "<i4", (2, 2)),
("precision1", "<f8"),
("precision2", "<f8"),
("doublearray", "|O8"),
("bitarray2", "|b1", (16,)),
]
if sys.byteorder == "big":
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace("<", ">")
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(
str(tmp_path / "regression.tabledata.xml"),
_debug_python_based_parser=_python_based,
)
assert_validate_schema(str(tmp_path / "regression.tabledata.xml"), votable.version)
if binary_mode == 1:
votable.get_first_table().format = "binary"
votable.version = "1.1"
elif binary_mode == 2:
votable.get_first_table()._config["version_1_3_or_later"] = True
votable.get_first_table().format = "binary2"
votable.version = "1.3"
# Also try passing a file handle
with open(str(tmp_path / "regression.binary.xml"), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmp_path / "regression.binary.xml"), votable.version)
# Also try passing a file handle
with open(str(tmp_path / "regression.binary.xml"), "rb") as fd:
votable2 = parse(fd, _debug_python_based_parser=_python_based)
votable2.get_first_table().format = "tabledata"
votable2.to_xml(
str(tmp_path / "regression.bin.tabledata.xml"),
_astropy_version="testing",
_debug_python_based_parser=_python_based,
)
assert_validate_schema(
str(tmp_path / "regression.bin.tabledata.xml"), votable.version
)
with open(
get_pkg_data_filename(
f"data/regression.bin.tabledata.truth.{votable.version}.xml"
),
encoding="utf-8",
) as fd:
truth = fd.readlines()
with open(str(tmp_path / "regression.bin.tabledata.xml"), encoding="utf-8") as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmp_path / "regression.bin.tabledata.xml.gz"),
_astropy_version="testing",
_debug_python_based_parser=_python_based,
)
with gzip.GzipFile(str(tmp_path / "regression.bin.tabledata.xml.gz"), "rb") as gzfd:
output = gzfd.readlines()
output = [x.decode("utf-8").rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail("legacy_float_repr")
def test_regression(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, False)
@pytest.mark.xfail("legacy_float_repr")
def test_regression_python_based_parser(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, True)
@pytest.mark.xfail("legacy_float_repr")
def test_regression_binary2(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, False, 2)
class TestFixups:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.table = parse(
get_pkg_data_filename("data/regression.xml")
).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array["string_test_2"], self.array["fixed string test"])
class TestReferences:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == "boolean"
assert fieldref.get_ref().datatype == "boolean"
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == "INPUT"
assert paramref.get_ref().datatype == "float"
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
columns = ["string_test", "unsignedByte", "bitarray"]
for c in columns:
assert not np.all(mask[c])
assert np.all(mask["unicode_test"])
def test_select_columns_by_name():
columns = ["string_test", "unsignedByte", "bitarray"]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask["unicode_test"])
class TestParse:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array["string_test"].dtype.type, np.object_)
assert_array_equal(
self.array["string_test"],
["String & test", "String & test", "XXXX", "", ""],
)
def test_fixed_string_test(self):
assert issubclass(self.array["string_test_2"].dtype.type, np.unicode_)
assert_array_equal(
self.array["string_test_2"], ["Fixed stri", "0123456789", "XXXX", "", ""]
)
def test_unicode_test(self):
assert issubclass(self.array["unicode_test"].dtype.type, np.object_)
assert_array_equal(
self.array["unicode_test"],
["Ceçi n'est pas un pipe", "வணக்கம்", "XXXX", "", ""],
)
def test_fixed_unicode_test(self):
assert issubclass(self.array["fixed_unicode_test"].dtype.type, np.unicode_)
assert_array_equal(
self.array["fixed_unicode_test"],
["Ceçi n'est", "வணக்கம்", "0123456789", "", ""],
)
def test_unsignedByte(self):
assert issubclass(self.array["unsignedByte"].dtype.type, np.uint8)
assert_array_equal(self.array["unsignedByte"], [128, 255, 0, 255, 255])
assert not np.any(self.mask["unsignedByte"])
def test_short(self):
assert issubclass(self.array["short"].dtype.type, np.int16)
assert_array_equal(self.array["short"], [4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask["short"])
def test_int(self):
assert issubclass(self.array["int"].dtype.type, np.int32)
assert_array_equal(
self.array["int"], [268435456, 2147483647, -268435456, 268435455, 123456789]
)
assert_array_equal(self.mask["int"], [False, False, False, False, True])
def test_long(self):
assert issubclass(self.array["long"].dtype.type, np.int64)
assert_array_equal(
self.array["long"],
[
922337203685477,
123456789,
-1152921504606846976,
1152921504606846975,
123456789,
],
)
assert_array_equal(self.mask["long"], [False, True, False, False, True])
def test_double(self):
assert issubclass(self.array["double"].dtype.type, np.float64)
assert_array_equal(
self.array["double"], [8.9990234375, 0.0, np.inf, np.nan, -np.inf]
)
assert_array_equal(self.mask["double"], [False, False, False, True, False])
def test_float(self):
assert issubclass(self.array["float"].dtype.type, np.float32)
assert_array_equal(self.array["float"], [1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask["float"], [False, False, False, False, True])
def test_array(self):
assert issubclass(self.array["array"].dtype.type, np.object_)
match = [
[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]],
]
for a, b in zip(self.array["array"], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data["array"][3].mask[0][0]
assert self.array.data["array"][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array["bit"].dtype.type, np.bool_)
assert_array_equal(self.array["bit"], [True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array["bitarray"].dtype.type, np.bool_)
assert self.array["bitarray"].shape == (5, 3, 2)
assert_array_equal(
self.array["bitarray"],
[
[[True, False], [True, True], [False, True]],
[[False, True], [False, False], [True, True]],
[[True, True], [True, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
],
)
def test_bitarray_mask(self):
assert_array_equal(
self.mask["bitarray"],
[
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[True, True], [True, True], [True, True]],
[[True, True], [True, True], [True, True]],
],
)
def test_bitvararray(self):
assert issubclass(self.array["bitvararray"].dtype.type, np.object_)
match = [
[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[],
[],
]
for a, b in zip(self.array["bitvararray"], match):
assert_array_equal(a, b)
match_mask = [
[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False,
False,
]
for a, b in zip(self.array["bitvararray"], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array["bitvararray2"].dtype.type, np.object_)
match = [
[],
[
[[False, True], [False, False], [True, False]],
[[True, False], [True, False], [True, False]],
],
[[[True, True], [True, True], [True, True]]],
[],
[],
]
for a, b in zip(self.array["bitvararray2"], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array["floatComplex"].dtype.type, np.complex64)
assert_array_equal(
self.array["floatComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + 0j, np.nan + 0j],
)
assert_array_equal(self.mask["floatComplex"], [True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array["doubleComplex"].dtype.type, np.complex128)
assert_array_equal(
self.array["doubleComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + (np.inf * 1j), np.nan + 0j],
)
assert_array_equal(self.mask["doubleComplex"], [True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array["doubleComplexArray"].dtype.type, np.object_)
assert [len(x) for x in self.array["doubleComplexArray"]] == [0, 2, 2, 0, 0]
def test_boolean(self):
assert issubclass(self.array["boolean"].dtype.type, np.bool_)
assert_array_equal(self.array["boolean"], [True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask["boolean"], [False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array["booleanArray"].dtype.type, np.bool_)
assert_array_equal(
self.array["booleanArray"],
[
[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False],
],
)
def test_boolean_array_mask(self):
assert_array_equal(
self.mask["booleanArray"],
[
[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True],
],
)
def test_nulls(self):
assert_array_equal(self.array["nulls"], [0, -9, 2, -9, -9])
assert_array_equal(self.mask["nulls"], [False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(
self.array["nulls_array"],
[
[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]],
],
)
assert_array_equal(
self.mask["nulls_array"],
[
[[True, True], [True, True]],
[[False, False], [False, False]],
[[True, False], [True, False]],
[[False, True], [False, True]],
[[True, True], [True, True]],
],
)
def test_double_array(self):
assert issubclass(self.array["doublearray"].dtype.type, np.object_)
assert len(self.array["doublearray"][0]) == 0
assert_array_equal(
self.array["doublearray"][1], [0, 1, np.inf, -np.inf, np.nan, 0, -1]
)
assert_array_equal(
self.array.data["doublearray"][1].mask,
[False, False, False, False, False, False, True],
)
def test_bit_array2(self):
assert_array_equal(
self.array["bitarray2"][0],
[
True,
True,
True,
True,
False,
False,
False,
False,
True,
True,
True,
True,
False,
False,
False,
False,
],
)
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"][0])
assert np.all(self.mask["bitarray2"][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id("J2000")
assert coosys.system == "eq_FK5"
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id("QUERY_STATUS")
assert info.value == "OK"
if self.votable.version != "1.1":
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..."
def test_repr(self):
assert "3 tables" in repr(self.votable)
assert (
repr(list(self.votable.iter_fields_and_params())[0])
== '<PARAM ID="awesome" arraysize="*" datatype="float" '
'name="INPUT" unit="deg" value="[0.0 0.0]"/>'
)
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == "[</>]"
# Table
assert repr(self.table).startswith("<VOTable")
class TestThroughTableData(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask["bitarray"])
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"])
def test_schema(self, tmp_path):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = tmp_path / "test_through_tabledata.xml"
with open(fn, "wb") as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, "1.1")
class TestThroughBinary(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
votable.get_first_table().format = "binary"
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask["bit"])
def test_bitarray_mask(self):
assert not np.any(self.mask["bitarray"])
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"])
class TestThroughBinary2(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
votable.version = "1.3"
votable.get_first_table()._config["version_1_3_or_later"] = True
votable.get_first_table().format = "binary2"
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from astropy.io.votable.tree import Field, Resource, Table, VOTableFile
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2"),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
# https://github.com/astropy/astropy/issues/13341
@np.errstate(over="ignore")
def test_open_files():
for filename in get_pkg_data_filenames("data", pattern="*.xml"):
if filename.endswith("custom_datatype.xml") or filename.endswith(
"timesys_errors.xml"
):
continue
parse(filename)
def test_too_many_columns():
with pytest.raises(VOTableSpecError):
parse(get_pkg_data_filename("data/too_many_columns.xml.gz"))
def test_build_from_scratch(tmp_path):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
tree.Field(
votable, ID="filename", name="filename", datatype="char", arraysize="1"
),
tree.Field(
votable, ID="matrix", name="matrix", datatype="double", arraysize="2x2"
),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmp_path / "new_votable.xml"))
votable = parse(str(tmp_path / "new_votable.xml"))
table = votable.get_first_table()
assert_array_equal(
table.array.mask,
np.array(
[
(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]]),
],
dtype=[("filename", "?"), ("matrix", "?", (2, 2))],
),
)
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename("data/regression.xml")
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(fpath, output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(get_pkg_data_filename("data/validation.txt"), encoding="utf-8") as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
@mock.patch("subprocess.Popen")
def test_validate_xmllint_true(mock_subproc_popen):
process_mock = mock.Mock()
attrs = {"communicate.return_value": ("ok", "ko"), "returncode": 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
assert validate(get_pkg_data_filename("data/empty_table.xml"), xmllint=True)
def test_validate_path_object():
"""Validating when source is passed as path object (#4412)."""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmp_path):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
# W39: Bit values can not be masked
with pytest.warns(W39):
with open(tmp_path / "regression.compressed.xml", "wb") as fd:
votable.to_xml(fd, compressed=True, _astropy_version="testing")
with open(tmp_path / "regression.compressed.xml", "rb") as fd:
votable = parse(fd)
def test_from_scratch_example():
_run_test_from_scratch_example()
def _run_test_from_scratch_example():
from astropy.io.votable.tree import Field, Resource, Table, VOTableFile
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2"),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == "test1.xml"
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from astropy.utils.xml import iterparser
filename = get_pkg_data_filename("data/regression.xml")
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == "win32":
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from astropy import units as u
votable = parse(get_pkg_data_filename("data/nonstandard_units.xml"))
assert isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(
get_pkg_data_filename("data/nonstandard_units.xml"), unit_format="generic"
)
assert not isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = "t2"
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(
get_pkg_data_filename("data/no_resource.xml"), output, xmllint=False
)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(get_pkg_data_filename("data/no_resource.txt"), encoding="utf-8") as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(
get_pkg_data_filename("data/custom_datatype.xml"),
datatype_mapping={"bar": "int"},
)
table = votable.get_first_table()
assert table.array.dtype["foo"] == np.int32
def _timesys_tests(votable):
assert len(list(votable.iter_timesys())) == 4
timesys = votable.get_timesys_by_id("time_frame")
assert timesys.timeorigin == 2455197.5
assert timesys.timescale == "TCB"
assert timesys.refposition == "BARYCENTER"
timesys = votable.get_timesys_by_id("mjd_origin")
assert timesys.timeorigin == "MJD-origin"
assert timesys.timescale == "TDB"
assert timesys.refposition == "EMBARYCENTER"
timesys = votable.get_timesys_by_id("jd_origin")
assert timesys.timeorigin == "JD-origin"
assert timesys.timescale == "TT"
assert timesys.refposition == "HELIOCENTER"
timesys = votable.get_timesys_by_id("no_origin")
assert timesys.timeorigin is None
assert timesys.timescale == "UTC"
assert timesys.refposition == "TOPOCENTER"
def test_timesys():
votable = parse(get_pkg_data_filename("data/timesys.xml"))
_timesys_tests(votable)
def test_timesys_roundtrip():
orig_votable = parse(get_pkg_data_filename("data/timesys.xml"))
bio = io.BytesIO()
orig_votable.to_xml(bio)
bio.seek(0)
votable = parse(bio)
_timesys_tests(votable)
def test_timesys_errors():
output = io.StringIO()
validate(get_pkg_data_filename("data/timesys_errors.xml"), output, xmllint=False)
outstr = output.getvalue()
assert "E23: Invalid timeorigin attribute 'bad-origin'" in outstr
assert "E22: ID attribute is required for all TIMESYS elements" in outstr
assert "W48: Unknown attribute 'refposition_mispelled' on TIMESYS" in outstr
def test_get_infos_by_name():
vot = parse(
io.BytesIO(
b"""
<VOTABLE xmlns="http://www.ivoa.net/xml/VOTable/v1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="1.4">
<RESOURCE type="results">
<INFO name="creator-name" value="Cannon, A."/>
<INFO name="creator-name" value="Fleming, W."/>
</RESOURCE>
</VOTABLE>"""
)
)
infos = vot.get_infos_by_name("creator-name")
assert [i.value for i in infos] == ["Cannon, A.", "Fleming, W."]
|
10376d4c2b6cff5b6667672c54cb46c0b686ff35d90d532efb39d4e52b2e2872 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the conversion to/from astropy.table.
"""
import io
import os
import pathlib
import numpy as np
import pytest
from astropy.io.votable import conf, from_table, is_votable, tree, validate
from astropy.io.votable.exceptions import E25, W39, VOWarning
from astropy.io.votable.table import parse, writeto
from astropy.table import Column, Table
from astropy.table.table_helpers import simple_table
from astropy.units import Unit
from astropy.utils.data import (
get_pkg_data_filename,
get_pkg_data_fileobj,
get_pkg_data_path,
)
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
@pytest.fixture
def home_is_data(monkeypatch):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory.
"""
path = get_pkg_data_path("data")
# For Unix
monkeypatch.setenv("HOME", path)
# For Windows
monkeypatch.setenv("USERPROFILE", path)
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
def test_table(tmp_path):
# Read the VOTABLE
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
table = votable.get_first_table()
astropy_table = table.to_table()
for name in table.array.dtype.names:
assert np.all(astropy_table.mask[name] == table.array.mask[name])
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
("string_test", {"datatype": "char", "arraysize": "*"}),
("string_test_2", {"datatype": "char", "arraysize": "10"}),
("unicode_test", {"datatype": "unicodeChar", "arraysize": "*"}),
("fixed_unicode_test", {"datatype": "unicodeChar", "arraysize": "10"}),
("string_array_test", {"datatype": "char", "arraysize": "4"}),
("unsignedByte", {"datatype": "unsignedByte"}),
("short", {"datatype": "short"}),
("int", {"datatype": "int"}),
("long", {"datatype": "long"}),
("double", {"datatype": "double"}),
("float", {"datatype": "float"}),
("array", {"datatype": "long", "arraysize": "2*"}),
("bit", {"datatype": "bit"}),
("bitarray", {"datatype": "bit", "arraysize": "3x2"}),
("bitvararray", {"datatype": "bit", "arraysize": "*"}),
("bitvararray2", {"datatype": "bit", "arraysize": "3x2*"}),
("floatComplex", {"datatype": "floatComplex"}),
("doubleComplex", {"datatype": "doubleComplex"}),
("doubleComplexArray", {"datatype": "doubleComplex", "arraysize": "*"}),
("doubleComplexArrayFixed", {"datatype": "doubleComplex", "arraysize": "2"}),
("boolean", {"datatype": "bit"}),
("booleanArray", {"datatype": "bit", "arraysize": "4"}),
("nulls", {"datatype": "int"}),
("nulls_array", {"datatype": "int", "arraysize": "2x2"}),
("precision1", {"datatype": "double"}),
("precision2", {"datatype": "double"}),
("doublearray", {"datatype": "double", "arraysize": "*"}),
("bitarray2", {"datatype": "bit", "arraysize": "16"}),
]
for field, (name, d) in zip(t.fields, field_types):
assert field.ID == name
assert (
field.datatype == d["datatype"]
), f'{name} expected {d["datatype"]} but get {field.datatype}'
if "arraysize" in d:
assert field.arraysize == d["arraysize"]
# W39: Bit values can not be masked
with pytest.warns(W39):
writeto(votable2, str(tmp_path / "through_table.xml"))
def test_read_from_tilde_path(home_is_data):
# Just test that these run without error for tilde-paths
path = os.path.join("~", "regression.xml")
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(path)
Table.read(path, format="votable", table_id="main_table")
def test_read_through_table_interface(tmp_path):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
with get_pkg_data_fileobj("data/regression.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable", table_id="main_table")
assert len(t) == 5
# Issue 8354
assert t["float"].format is None
fn = tmp_path / "table_interface.xml"
# W39: Bit values can not be masked
with pytest.warns(W39):
t.write(fn, table_id="FOO", format="votable")
with open(fn, "rb") as fd:
t2 = Table.read(fd, format="votable", table_id="FOO")
assert len(t2) == 5
def test_read_through_table_interface2():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
with get_pkg_data_fileobj("data/regression.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable", table_id="last_table")
assert len(t) == 0
def test_pass_kwargs_through_table_interface():
# Table.read() should pass on keyword arguments meant for parse()
filename = get_pkg_data_filename("data/nonstandard_units.xml")
t = Table.read(filename, format="votable", unit_format="generic")
assert t["Flux1"].unit == Unit("erg / (Angstrom cm2 s)")
def test_names_over_ids():
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=True)
assert table.colnames == [
"Name",
"GLON",
"GLAT",
"RAdeg",
"DEdeg",
"Jmag",
"Hmag",
"Kmag",
"G3.6mag",
"G4.5mag",
"G5.8mag",
"G8.0mag",
"4.5mag",
"8.0mag",
"Emag",
"24mag",
"f_Name",
]
def test_explicit_ids():
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=False)
assert table.colnames == [
"col1",
"col2",
"col3",
"col4",
"col5",
"col6",
"col7",
"col8",
"col9",
"col10",
"col11",
"col12",
"col13",
"col14",
"col15",
"col16",
"col17",
]
def test_table_read_with_unnamed_tables():
"""
Issue #927.
"""
with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd:
t = Table.read(fd, format="votable")
assert len(t) == 1
def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename("data/names.xml"))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266
def test_from_table_without_mask():
t = Table()
c = Column(data=[1, 2, 3], name="a")
t.add_column(c)
output = io.BytesIO()
t.write(output, format="votable")
def test_write_with_format():
t = Table()
c = Column(data=[1, 2, 3], name="a")
t.add_column(c)
output = io.BytesIO()
t.write(output, format="votable", tabledata_format="binary")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b"BINARY" in obuff
assert b"TABLEDATA" not in obuff
output = io.BytesIO()
t.write(output, format="votable", tabledata_format="binary2")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b"BINARY2" in obuff
assert b"TABLEDATA" not in obuff
def test_write_overwrite(tmp_path):
t = simple_table(3, 3)
filename = tmp_path / "overwrite_test.vot"
t.write(filename, format="votable")
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format="votable")
t.write(filename, format="votable", overwrite=True)
def test_write_tilde_path(home_is_tmpdir):
fname = os.path.join("~", "output")
t = Table()
t["a"] = [1, 2, 3]
t.write(fname, format="votable", tabledata_format="binary")
# Ensure the tilde-prefixed path wasn't treated literally
assert not os.path.exists(fname)
with open(os.path.expanduser(fname)) as f:
obuff = f.read()
assert 'VOTABLE version="1.4"' in obuff
assert "BINARY" in obuff
assert "TABLEDATA" not in obuff
@pytest.mark.parametrize("path_format", ["plain", "tilde"])
def test_writeto(path_format, tmp_path, home_is_tmpdir):
if path_format == "plain":
# pathlib.Path objects are not accepted by votable.writeto, so convert
# to a string
fname = str(tmp_path / "writeto_test.vot")
else:
fname = os.path.join("~", "writeto_test.vot")
t = Table()
t["a"] = [1, 2, 3]
vt = from_table(t)
writeto(vt, fname)
if path_format == "tilde":
# Ensure the tilde-prefixed path wasn't treated literally
assert not os.path.exists(fname)
with open(os.path.expanduser(fname)) as f:
obuff = f.read()
assert 'VOTABLE version="1.4"' in obuff
assert "BINARY" not in obuff
assert "TABLEDATA" in obuff
def test_empty_table():
votable = parse(get_pkg_data_filename("data/empty_table.xml"))
table = votable.get_first_table()
table.to_table()
def test_no_field_not_empty_table():
votable = parse(get_pkg_data_filename("data/no_field_not_empty_table.xml"))
table = votable.get_first_table()
assert len(table.fields) == 0
assert len(table.infos) == 1
def test_no_field_not_empty_table_exception():
with pytest.raises(E25):
parse(
get_pkg_data_filename("data/no_field_not_empty_table.xml"),
verify="exception",
)
def test_binary2_masked_strings():
"""
Issue #8995.
"""
# Read a VOTable which sets the null mask bit for each empty string value.
votable = parse(get_pkg_data_filename("data/binary2_masked_strings.xml"))
table = votable.get_first_table()
astropy_table = table.to_table()
# Ensure string columns have no masked values and can be written out
assert not np.any(table.array.mask["epoch_photometry_url"])
output = io.BytesIO()
astropy_table.write(output, format="votable")
def test_validate_output_invalid():
"""
Issue #12603. Test that we get the correct output from votable.validate with an invalid
votable.
"""
# A votable with errors
invalid_votable_filepath = get_pkg_data_filename("data/regression.xml")
# When output is None, check that validate returns validation output as a string
validate_out = validate(invalid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known error string
assert "E02: Incorrect number of elements in array." in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(invalid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is not valid)
assert validate_out is False
def test_validate_output_valid():
"""
Issue #12603. Test that we get the correct output from votable.validate with a valid
votable.
"""
# A valid votable. (Example from the votable standard:
# https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html )
valid_votable_filepath = get_pkg_data_filename("data/valid_votable.xml")
# When output is None, check that validate returns validation output as a string
validate_out = validate(valid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known good output string
assert "astropy.io.votable found no violations" in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(valid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is valid)
assert validate_out is True
def test_validate_tilde_path(home_is_data):
validate(os.path.join("~", "valid_votable.xml"))
def test_is_votable_tilde_path(home_is_data):
assert is_votable(os.path.join("~", "valid_votable.xml"))
class TestVerifyOptions:
# Start off by checking the default (ignore)
def test_default(self):
parse(get_pkg_data_filename("data/gemini.xml"))
# Then try the various explicit options
def test_verify_ignore(self):
parse(get_pkg_data_filename("data/gemini.xml"), verify="ignore")
def test_verify_warn(self):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"), verify="warn")
assert len(w) == 24
def test_verify_exception(self):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"), verify="exception")
# Make sure that the default behavior can be set via configuration items
def test_conf_verify_ignore(self):
with conf.set_temp("verify", "ignore"):
parse(get_pkg_data_filename("data/gemini.xml"))
def test_conf_verify_warn(self):
with conf.set_temp("verify", "warn"):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename("data/gemini.xml"))
assert len(w) == 24
def test_conf_verify_exception(self):
with conf.set_temp("verify", "exception"):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename("data/gemini.xml"))
|
51bd59eb5be9fc04e7846af56d39e82e10e8d936a1b24968a9ea56d3f18e6d4f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class to handle a validation result for a single VOTable
file.
"""
# STDLIB
import hashlib
import http.client
import os
import pickle
import shutil
import socket
import subprocess
import urllib.error
import urllib.request
import warnings
from xml.parsers.expat import ExpatError
# VO
from astropy.io.votable import exceptions, table, xmlutil
class Result:
def __init__(self, url, root="results", timeout=10):
self.url = url
m = hashlib.md5()
m.update(url)
self._hash = m.hexdigest()
self._root = root
self._path = os.path.join(self._hash[0:2], self._hash[2:4], self._hash[4:])
if not os.path.exists(self.get_dirpath()):
os.makedirs(self.get_dirpath())
self.timeout = timeout
self.load_attributes()
def __enter__(self):
return self
def __exit__(self, *args):
self.save_attributes()
def get_dirpath(self):
return os.path.join(self._root, self._path)
def get_htmlpath(self):
return self._path
def get_attribute_path(self):
return os.path.join(self.get_dirpath(), "values.dat")
def get_vo_xml_path(self):
return os.path.join(self.get_dirpath(), "vo.xml")
# ATTRIBUTES
def load_attributes(self):
path = self.get_attribute_path()
if os.path.exists(path):
try:
with open(path, "rb") as fd:
self._attributes = pickle.load(fd)
except Exception:
shutil.rmtree(self.get_dirpath())
os.makedirs(self.get_dirpath())
self._attributes = {}
else:
self._attributes = {}
def save_attributes(self):
path = self.get_attribute_path()
with open(path, "wb") as fd:
pickle.dump(self._attributes, fd)
def __getitem__(self, key):
return self._attributes[key]
def __setitem__(self, key, val):
self._attributes[key] = val
def __contains__(self, key):
return key in self._attributes
# VO XML
def download_xml_content(self):
path = self.get_vo_xml_path()
if "network_error" not in self._attributes:
self["network_error"] = None
if os.path.exists(path):
return
def fail(reason):
reason = str(reason)
with open(path, "wb") as fd:
fd.write(f"FAILED: {reason}\n".encode())
self["network_error"] = reason
r = None
try:
r = urllib.request.urlopen(self.url.decode("ascii"), timeout=self.timeout)
except urllib.error.URLError as e:
if hasattr(e, "reason"):
reason = e.reason
else:
reason = e.code
fail(reason)
return
except http.client.HTTPException as e:
fail(f"HTTPException: {e}")
return
except (socket.timeout, OSError) as e:
fail("Timeout")
return
if r is None:
fail("Invalid URL")
return
try:
content = r.read()
except socket.timeout as e:
fail("Timeout")
return
else:
r.close()
with open(path, "wb") as fd:
fd.write(content)
def get_xml_content(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
with open(path, "rb") as fd:
content = fd.read()
return content
def validate_vo(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
self["version"] = ""
if "network_error" in self and self["network_error"] is not None:
self["nwarnings"] = 0
self["nexceptions"] = 0
self["warnings"] = []
self["xmllint"] = None
self["warning_types"] = set()
return
nexceptions = 0
nwarnings = 0
t = None
lines = []
with open(path, "rb") as input:
with warnings.catch_warnings(record=True) as warning_lines:
try:
t = table.parse(input, verify="warn", filename=path)
except (ValueError, TypeError, ExpatError) as e:
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
if t is not None:
self["version"] = version = t.version
else:
self["version"] = version = "1.0"
if "xmllint" not in self:
# Now check the VO schema based on the version in
# the file.
try:
success, stdout, stderr = xmlutil.validate_schema(path, version)
# OSError is raised when XML file eats all memory and
# system sends kill signal.
except OSError as e:
self["xmllint"] = None
self["xmllint_content"] = str(e)
else:
self["xmllint"] = success == 0
self["xmllint_content"] = stderr
warning_types = set()
for line in lines:
w = exceptions.parse_vowarning(line)
if w["is_warning"]:
nwarnings += 1
if w["is_exception"]:
nexceptions += 1
warning_types.add(w["warning"])
self["nwarnings"] = nwarnings
self["nexceptions"] = nexceptions
self["warnings"] = lines
self["warning_types"] = warning_types
def has_warning(self, warning_code):
return warning_code in self["warning_types"]
def match_expectations(self):
if "network_error" not in self:
self["network_error"] = None
if self["expected"] == "good":
return (
not self["network_error"]
and self["nwarnings"] == 0
and self["nexceptions"] == 0
)
elif self["expected"] == "incorrect":
return not self["network_error"] and (
self["nwarnings"] > 0 or self["nexceptions"] > 0
)
elif self["expected"] == "broken":
return self["network_error"] is not None
def validate_with_votlint(self, path_to_stilts_jar):
filename = self.get_vo_xml_path()
p = subprocess.Popen(
["java", "-jar", path_to_stilts_jar, "votlint", "validate=false", filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
if len(stdout) or p.returncode:
self["votlint"] = False
else:
self["votlint"] = True
self["votlint_content"] = stdout
def get_result_subsets(results, root, s=None):
all_results = []
correct = []
not_expected = []
fail_schema = []
schema_mismatch = []
fail_votlint = []
votlint_mismatch = []
network_failures = []
version_10 = []
version_11 = []
version_12 = []
version_unknown = []
has_warnings = []
warning_set = {}
has_exceptions = []
exception_set = {}
for url in results:
if s:
next(s)
if isinstance(url, Result):
x = url
else:
x = Result(url, root=root)
all_results.append(x)
if x["nwarnings"] == 0 and x["nexceptions"] == 0 and x["xmllint"] is True:
correct.append(x)
if not x.match_expectations():
not_expected.append(x)
if x["xmllint"] is False:
fail_schema.append(x)
if x["xmllint"] is False and x["nwarnings"] == 0 and x["nexceptions"] == 0:
schema_mismatch.append(x)
if "votlint" in x and x["votlint"] is False:
fail_votlint.append(x)
if "network_error" not in x:
x["network_error"] = None
if (
x["nwarnings"] == 0
and x["nexceptions"] == 0
and x["network_error"] is None
):
votlint_mismatch.append(x)
if "network_error" in x and x["network_error"] is not None:
network_failures.append(x)
version = x["version"]
if version == "1.0":
version_10.append(x)
elif version == "1.1":
version_11.append(x)
elif version == "1.2":
version_12.append(x)
else:
version_unknown.append(x)
if x["nwarnings"] > 0:
has_warnings.append(x)
for warning in x["warning_types"]:
if (
warning is not None
and len(warning) == 3
and warning.startswith("W")
):
warning_set.setdefault(warning, [])
warning_set[warning].append(x)
if x["nexceptions"] > 0:
has_exceptions.append(x)
for exc in x["warning_types"]:
if exc is not None and len(exc) == 3 and exc.startswith("E"):
exception_set.setdefault(exc, [])
exception_set[exc].append(x)
warning_set = list(warning_set.items())
warning_set.sort()
exception_set = list(exception_set.items())
exception_set.sort()
tables = [
("all", "All tests", all_results),
("correct", "Correct", correct),
("unexpected", "Unexpected", not_expected),
("schema", "Invalid against schema", fail_schema),
(
"schema_mismatch",
"Invalid against schema/Passed vo.table",
schema_mismatch,
["ul"],
),
("fail_votlint", "Failed votlint", fail_votlint),
(
"votlint_mismatch",
"Failed votlint/Passed vo.table",
votlint_mismatch,
["ul"],
),
("network_failures", "Network failures", network_failures),
("version1.0", "Version 1.0", version_10),
("version1.1", "Version 1.1", version_11),
("version1.2", "Version 1.2", version_12),
("version_unknown", "Version unknown", version_unknown),
("warnings", "Warnings", has_warnings),
]
for warning_code, warning in warning_set:
if s:
next(s)
warning_class = getattr(exceptions, warning_code, None)
if warning_class:
warning_descr = warning_class.get_short_name()
tables.append(
(
warning_code,
f"{warning_code}: {warning_descr}",
warning,
["ul", "li"],
)
)
tables.append(("exceptions", "Exceptions", has_exceptions))
for exception_code, exc in exception_set:
if s:
next(s)
exception_class = getattr(exceptions, exception_code, None)
if exception_class:
exception_descr = exception_class.get_short_name()
tables.append(
(
exception_code,
f"{exception_code}: {exception_descr}",
exc,
["ul", "li"],
)
)
return tables
|
9019216816216c76a6650d139fbfe55888212f915d82107d699ca3bf3fb1224a | from astropy.timeseries.periodograms.base import *
from astropy.timeseries.periodograms.bls import *
from astropy.timeseries.periodograms.lombscargle import *
from astropy.timeseries.periodograms.lombscargle_multiband import *
|
7b5eef685a3bb68e3bb8389a3e94a5af8c9dc13b5ad8824a40df5efec5dde533 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_equal
from astropy import units as u
from astropy.table import QTable, Table, join, vstack
from astropy.time import Time
from astropy.timeseries.binned import BinnedTimeSeries
from astropy.timeseries.sampled import TimeSeries
INPUT_TIME = Time(["2016-03-22T12:30:31", "2015-01-21T12:30:32", "2016-03-22T12:30:40"])
PLAIN_TABLE = Table(
[[1.0, 2.0, 11.0], [3, 4, 1], ["x", "y", "z"]], names=["a", "b", "c"]
)
class CommonTimeSeriesTests:
def test_stacking(self):
ts = vstack([self.series, self.series])
assert isinstance(ts, self.series.__class__)
def test_row_slicing(self):
ts = self.series[:2]
assert isinstance(ts, self.series.__class__)
def test_row_indexing(self):
assert self.series[1][self.time_attr] == Time("2015-01-21T12:30:32")
assert self.series[self.time_attr][1] == Time("2015-01-21T12:30:32")
def test_column_indexing(self):
assert_equal(self.series["a"], [1, 2, 11])
def test_column_slicing_notime(self):
tab = self.series["a", "b"]
assert not isinstance(tab, self.series.__class__)
assert isinstance(tab, QTable)
def test_add_column(self):
self.series["d"] = [1, 2, 3]
def test_add_row(self):
self.series.add_row(self._row)
def test_set_unit(self):
self.series["d"] = [1, 2, 3]
self.series["d"].unit = "s"
def test_replace_column(self):
self.series.replace_column("c", [1, 3, 4])
def test_required_after_stacking(self):
# When stacking, we have to temporarily relax the checking of the
# columns in the time series, but we need to make sure that the
# checking works again afterwards
ts = vstack([self.series, self.series])
with pytest.raises(ValueError, match=r"TimeSeries object is invalid"):
ts.remove_columns(ts.colnames)
def test_join(self):
ts_other = self.series.copy()
ts_other.add_row(self._row)
ts_other["d"] = [11, 22, 33, 44]
ts_other.remove_columns(["a", "b"])
ts = join(self.series, ts_other)
assert len(ts) == len(self.series)
ts = join(self.series, ts_other, join_type="outer")
assert len(ts) == len(ts_other)
class TestTimeSeries(CommonTimeSeriesTests):
_row = {"time": "2016-03-23T12:30:40", "a": 1.0, "b": 2, "c": "a"}
def setup_method(self, method):
self.series = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
self.time_attr = "time"
def test_column_slicing(self):
ts = self.series["time", "a"]
assert isinstance(ts, TimeSeries)
class TestBinnedTimeSeries(CommonTimeSeriesTests):
_row = {
"time_bin_start": "2016-03-23T12:30:40",
"time_bin_size": 2 * u.s,
"a": 1.0,
"b": 2,
"c": "a",
}
def setup_method(self, method):
self.series = BinnedTimeSeries(
time_bin_start=INPUT_TIME, time_bin_size=3 * u.s, data=PLAIN_TABLE
)
self.time_attr = "time_bin_start"
def test_column_slicing(self):
ts = self.series["time_bin_start", "time_bin_size", "a"]
assert isinstance(ts, BinnedTimeSeries)
|
75e28e658d91d54a2a76e384ec81abd81787448a44cd49eab53c850d391007a3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"]
import numpy as np
from astropy import units
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms.base import BasePeriodogram
from astropy.timeseries.periodograms.lombscargle.core import has_units, strip_units
from . import methods
def validate_unit_consistency(reference_object, input_object):
if has_units(reference_object):
input_object = units.Quantity(input_object, unit=reference_object.unit)
else:
if has_units(input_object):
input_object = units.Quantity(input_object, unit=units.one)
input_object = input_object.value
return input_object
class BoxLeastSquares(BasePeriodogram):
"""Compute the box least squares periodogram.
This method is a commonly used tool for discovering transiting exoplanets
or eclipsing binaries in photometric time series datasets. This
implementation is based on the "box least squares (BLS)" method described
in [1]_ and [2]_.
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times ``t``.
dy : float, array-like, or `~astropy.units.Quantity`, optional
Error or sequence of observational errors associated with times ``t``.
Examples
--------
Generate noisy data with a transit:
>>> rand = np.random.default_rng(42)
>>> t = rand.uniform(0, 10, 500)
>>> y = np.ones_like(t)
>>> y[np.abs((t + 1.0)%2.0-1)<0.08] = 1.0 - 0.1
>>> y += 0.01 * rand.standard_normal(len(t))
Compute the transit periodogram on a heuristically determined period grid
and find the period with maximum power:
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16)
>>> results.period[np.argmax(results.power)] # doctest: +FLOAT_CMP
2.000412388152837
Compute the periodogram on a user-specified period grid:
>>> periods = np.linspace(1.9, 2.1, 5)
>>> results = model.power(periods, 0.16)
>>> results.power # doctest: +FLOAT_CMP
array([0.01723948, 0.0643028 , 0.1338783 , 0.09428816, 0.03577543])
If the inputs are AstroPy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.day
>>> y = y * u.dimensionless_unscaled
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16 * u.day)
>>> results.period.unit
Unit("d")
>>> results.power.unit
Unit(dimensionless)
References
----------
.. [1] Kovacs, Zucker, & Mazeh (2002), A&A, 391, 369
(arXiv:astro-ph/0206099)
.. [2] Hartman & Bakos (2016), Astronomy & Computing, 17, 1
(arXiv:1605.06811)
"""
def __init__(self, t, y, dy=None):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to("day")
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, (Time, TimeDelta)):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
def autoperiod(
self,
duration,
minimum_period=None,
maximum_period=None,
minimum_n_transit=3,
frequency_factor=1.0,
):
"""Determine a suitable grid of periods.
This method uses a set of heuristics to select a conservative period
grid that is uniform in frequency. This grid might be too fine for
some user's needs depending on the precision requirements or the
sampling of the data. The grid can be made coarser by increasing
``frequency_factor``.
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
minimum_period, maximum_period : float or `~astropy.units.Quantity` ['time'], optional
The minimum/maximum periods to search. If not provided, these will
be computed as described in the notes below.
minimum_n_transit : int, optional
If ``maximum_period`` is not provided, this is used to compute the
maximum period to search by asserting that any systems with at
least ``minimum_n_transits`` will be within the range of searched
periods. Note that this is not the same as requiring that
``minimum_n_transits`` be required for detection. The default
value is ``3``.
frequency_factor : float, optional
A factor to control the frequency spacing as described in the
notes below. The default value is ``1.0``.
Returns
-------
period : array-like or `~astropy.units.Quantity` ['time']
The set of periods computed using these heuristics with the same
units as ``t``.
Notes
-----
The default minimum period is chosen to be twice the maximum duration
because there won't be much sensitivity to periods shorter than that.
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(t) - min(t)) / minimum_n_transits
ensuring that any systems with at least ``minimum_n_transits`` are
within the range of searched periods.
The frequency spacing is given by
.. code-block:: python
df = frequency_factor * min(duration) / (max(t) - min(t))**2
so the grid can be made finer by decreasing ``frequency_factor`` or
coarser by increasing ``frequency_factor``.
"""
duration = self._validate_duration(duration)
baseline = strip_units(self._trel.max() - self._trel.min())
min_duration = strip_units(np.min(duration))
# Estimate the required frequency spacing
# Because of the sparsity of a transit, this must be much finer than
# the frequency resolution for a sinusoidal fit. For a sinusoidal fit,
# df would be 1/baseline (see LombScargle), but here this should be
# scaled proportionally to the duration in units of baseline.
df = frequency_factor * min_duration / baseline**2
# If a minimum period is not provided, choose one that is twice the
# maximum duration because we won't be sensitive to any periods
# shorter than that.
if minimum_period is None:
minimum_period = 2.0 * strip_units(np.max(duration))
else:
minimum_period = validate_unit_consistency(self._trel, minimum_period)
minimum_period = strip_units(minimum_period)
# If no maximum period is provided, choose one by requiring that
# all signals with at least minimum_n_transit should be detectable.
if maximum_period is None:
if minimum_n_transit <= 1:
raise ValueError("minimum_n_transit must be greater than 1")
maximum_period = baseline / (minimum_n_transit - 1)
else:
maximum_period = validate_unit_consistency(self._trel, maximum_period)
maximum_period = strip_units(maximum_period)
if maximum_period < minimum_period:
minimum_period, maximum_period = maximum_period, minimum_period
if minimum_period <= 0.0:
raise ValueError("minimum_period must be positive")
# Convert bounds to frequency
minimum_frequency = 1.0 / strip_units(maximum_period)
maximum_frequency = 1.0 / strip_units(minimum_period)
# Compute the number of frequencies and the frequency grid
nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
return 1.0 / (maximum_frequency - df * np.arange(nf)) * self._t_unit()
def autopower(
self,
duration,
objective=None,
method=None,
oversample=10,
minimum_n_transit=3,
minimum_period=None,
maximum_period=None,
frequency_factor=1.0,
):
"""Compute the periodogram at set of heuristically determined periods.
This method calls :func:`BoxLeastSquares.autoperiod` to determine
the period grid and then :func:`BoxLeastSquares.power` to compute
the periodogram. See those methods for documentation of the arguments.
"""
period = self.autoperiod(
duration,
minimum_n_transit=minimum_n_transit,
minimum_period=minimum_period,
maximum_period=maximum_period,
frequency_factor=frequency_factor,
)
return self.power(
period, duration, objective=objective, method=method, oversample=oversample
)
def power(self, period, duration, objective=None, method=None, oversample=10):
"""Compute the periodogram for a set of periods.
Parameters
----------
period : array-like or `~astropy.units.Quantity` ['time']
The periods where the power should be computed
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations to test
objective : {'likelihood', 'snr'}, optional
The scalar that should be optimized to find the best fit phase,
duration, and depth. This can be either ``'likelihood'`` (default)
to optimize the log-likelihood of the model, or ``'snr'`` to
optimize the signal-to-noise with which the transit depth is
measured.
method : {'fast', 'slow'}, optional
The computational method used to compute the periodogram. This is
mainly included for the purposes of testing and most users will
want to use the optimized ``'fast'`` method (default) that is
implemented in Cython. ``'slow'`` is a brute-force method that is
used to test the results of the ``'fast'`` method.
oversample : int, optional
The number of bins per duration that should be used. This sets the
time resolution of the phase fit with larger values of
``oversample`` yielding a finer grid and higher computational cost.
Returns
-------
results : BoxLeastSquaresResults
The periodogram results as a :class:`BoxLeastSquaresResults`
object.
Raises
------
ValueError
If ``oversample`` is not an integer greater than 0 or if
``objective`` or ``method`` are not valid.
"""
period, duration = self._validate_period_and_duration(period, duration)
# Check for absurdities in the ``oversample`` choice
try:
oversample = int(oversample)
except TypeError:
raise ValueError(f"oversample must be an int, got {oversample}")
if oversample < 1:
raise ValueError("oversample must be greater than or equal to 1")
# Select the periodogram objective
if objective is None:
objective = "likelihood"
allowed_objectives = ["snr", "likelihood"]
if objective not in allowed_objectives:
raise ValueError(
f"Unrecognized method '{objective}'\n"
f"allowed methods are: {allowed_objectives}"
)
use_likelihood = objective == "likelihood"
# Select the computational method
if method is None:
method = "fast"
allowed_methods = ["fast", "slow"]
if method not in allowed_methods:
raise ValueError(
f"Unrecognized method '{method}'\n"
f"allowed methods are: {allowed_methods}"
)
# Format and check the input arrays
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
t_ref = np.min(t)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# Make sure that the period and duration arrays are C-order
period_fmt = np.ascontiguousarray(strip_units(period), dtype=np.float64)
duration = np.ascontiguousarray(strip_units(duration), dtype=np.float64)
# Select the correct implementation for the chosen method
if method == "fast":
bls = methods.bls_fast
else:
bls = methods.bls_slow
# Run the implementation
results = bls(
t - t_ref,
y - np.median(y),
ivar,
period_fmt,
duration,
oversample,
use_likelihood,
)
return self._format_results(t_ref, objective, period, results)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to("day")
if self._tstart is None:
if isinstance(times, Time):
raise TypeError(
f"{name} was provided as an absolute time but "
"the BoxLeastSquares class was initialized "
"with relative times."
)
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError(
f"{name} was provided as a relative time but "
"the BoxLeastSquares class was initialized "
"with absolute times."
)
times = validate_unit_consistency(self._trel, times)
return times
def _as_absolute_time_if_needed(self, name, times):
"""
Convert the provided times to absolute times using the current _tstart
value, if needed.
"""
if self._tstart is not None:
# Some time formats/scales can't represent dates/times too far
# off from the present, so we need to mask values offset by
# more than 100,000 yr (the periodogram algorithm can return
# transit times of e.g 1e300 for some periods).
reset = np.abs(times.to_value(u.year)) > 100000
times[reset] = 0
times = self._tstart + times
times[reset] = np.nan
return times
def model(self, t_model, period, duration, transit_time):
"""Compute the transit model at the given period, duration, and phase.
Parameters
----------
t_model : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
Times at which to compute the model.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
y_model : array-like or `~astropy.units.Quantity`
The model evaluated at the times ``t_model`` with units of ``y``.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
t_model = strip_units(self._as_relative_time("t_model", t_model))
period = float(strip_units(period[0]))
duration = float(strip_units(duration[0]))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# Compute the depth
hp = 0.5 * period
m_in = np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
m_out = ~m_in
y_in = np.sum(y[m_in] * ivar[m_in]) / np.sum(ivar[m_in])
y_out = np.sum(y[m_out] * ivar[m_out]) / np.sum(ivar[m_out])
# Evaluate the model
y_model = y_out + np.zeros_like(t_model)
m_model = np.abs((t_model - transit_time + hp) % period - hp) < 0.5 * duration
y_model[m_model] = y_in
return y_model * self._y_unit()
def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model.
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
period = float(strip_units(period[0]))
duration = float(strip_units(duration[0]))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# This a helper function that will compute the depth for several
# different hypothesized transit models with different parameters
def _compute_depth(m, y_out=None, var_out=None):
if np.any(m) and (var_out is None or np.isfinite(var_out)):
var_m = 1.0 / np.sum(ivar[m])
y_m = np.sum(y[m] * ivar[m]) * var_m
if y_out is None:
return y_m, var_m
return y_out - y_m, np.sqrt(var_m + var_out)
return 0.0, np.inf
# Compute the depth of the fiducial model and the two models at twice
# the period
hp = 0.5 * period
m_in = np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
m_out = ~m_in
m_odd = np.abs((t - transit_time) % (2 * period) - period) < 0.5 * duration
m_even = (
np.abs((t - transit_time + period) % (2 * period) - period) < 0.5 * duration
)
y_out, var_out = _compute_depth(m_out)
depth = _compute_depth(m_in, y_out, var_out)
depth_odd = _compute_depth(m_odd, y_out, var_out)
depth_even = _compute_depth(m_even, y_out, var_out)
y_in = y_out - depth[0]
# Compute the depth of the model at a phase of 0.5*period
m_phase = np.abs((t - transit_time) % period - hp) < 0.5 * duration
depth_phase = _compute_depth(m_phase, *_compute_depth((~m_phase) & m_out))
# Compute the depth of a model with a period of 0.5*period
m_half = (
np.abs((t - transit_time + 0.25 * period) % (0.5 * period) - 0.25 * period)
< 0.5 * duration
)
depth_half = _compute_depth(m_half, *_compute_depth(~m_half))
# Compute the number of points in each transit
transit_id = np.round((t[m_in] - transit_time) / period).astype(int)
transit_times = (
period * np.arange(transit_id.min(), transit_id.max() + 1) + transit_time
)
unique_ids, unique_counts = np.unique(transit_id, return_counts=True)
unique_ids -= np.min(transit_id)
transit_id -= np.min(transit_id)
counts = np.zeros(np.max(transit_id) + 1, dtype=int)
counts[unique_ids] = unique_counts
# Compute the per-transit log likelihood
ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in) ** 2 - (y[m_in] - y_out) ** 2)
lls = np.zeros(len(counts))
for i in unique_ids:
lls[i] = np.sum(ll[transit_id == i])
full_ll = -0.5 * np.sum(ivar[m_in] * (y[m_in] - y_in) ** 2)
full_ll -= 0.5 * np.sum(ivar[m_out] * (y[m_out] - y_out) ** 2)
# Compute the log likelihood of a sine model
A = np.vstack(
(
np.sin(2 * np.pi * t / period),
np.cos(2 * np.pi * t / period),
np.ones_like(t),
)
).T
w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]), np.dot(A.T, y * ivar))
mod = np.dot(A, w)
sin_ll = -0.5 * np.sum((y - mod) ** 2 * ivar)
# Format the results
y_unit = self._y_unit()
ll_unit = 1
if self.dy is None:
ll_unit = y_unit * y_unit
return dict(
transit_times=self._as_absolute_time_if_needed(
"transit_times", transit_times * self._t_unit()
),
per_transit_count=counts,
per_transit_log_likelihood=lls * ll_unit,
depth=(depth[0] * y_unit, depth[1] * y_unit),
depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit),
depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit),
depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit),
depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit),
harmonic_amplitude=np.sqrt(np.sum(w[:2] ** 2)) * y_unit,
harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit,
)
def transit_mask(self, t, period, duration, transit_time):
"""Compute which data points are in transit for a given parameter set.
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times where the mask should be evaluated.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
transit_mask : array-like
A boolean array where ``True`` indicates and in transit point and
``False`` indicates and out-of-transit point.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
t = strip_units(self._as_relative_time("t", t))
period = float(strip_units(period[0]))
duration = float(strip_units(duration[0]))
transit_time = float(strip_units(transit_time))
hp = 0.5 * period
return np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
def _validate_inputs(self, t, y, dy):
"""Private method used to check the consistency of the inputs.
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times t.
dy : float, array-like, or `~astropy.units.Quantity`
Error or sequence of observational errors associated with times t.
Returns
-------
t, y, dy : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The inputs with consistent shapes and units.
Raises
------
ValueError
If the dimensions are incompatible or if the units of dy cannot be
converted to the units of y.
"""
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if dy is not None:
dy = validate_unit_consistency(y, dy)
return t, y, dy
def _validate_duration(self, duration):
"""Private method used to check a set of test durations.
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity`
The set of durations that will be considered.
Returns
-------
duration : array-like or `~astropy.units.Quantity`
The input reformatted with the correct shape and units.
Raises
------
ValueError
If the units of duration cannot be converted to the units of t.
"""
duration = np.atleast_1d(np.abs(duration))
if duration.ndim != 1 or duration.size == 0:
raise ValueError("duration must be 1-dimensional")
return validate_unit_consistency(self._trel, duration)
def _validate_period_and_duration(self, period, duration):
"""Private method used to check a set of periods and durations.
Parameters
----------
period : float, array-like, or `~astropy.units.Quantity` ['time']
The set of test periods.
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
Returns
-------
period, duration : array-like or `~astropy.units.Quantity` ['time']
The inputs reformatted with the correct shapes and units.
Raises
------
ValueError
If the units of period or duration cannot be converted to the
units of t.
"""
duration = self._validate_duration(duration)
period = np.atleast_1d(np.abs(period))
if period.ndim != 1 or period.size == 0:
raise ValueError("period must be 1-dimensional")
period = validate_unit_consistency(self._trel, period)
if not np.min(period) > np.max(duration):
raise ValueError(
"The maximum transit duration must be shorter than the minimum period"
)
return period, duration
def _format_results(self, t_ref, objective, period, results):
"""A private method used to wrap and add units to the periodogram.
Parameters
----------
t_ref : float
The minimum time in the time series (a reference time).
objective : str
The name of the objective used in the optimization.
period : array-like or `~astropy.units.Quantity` ['time']
The set of trial periods.
results : tuple
The output of one of the periodogram implementations.
"""
(
power,
depth,
depth_err,
duration,
transit_time,
depth_snr,
log_likelihood,
) = results
transit_time += t_ref
if has_units(self._trel):
transit_time = units.Quantity(transit_time, unit=self._trel.unit)
transit_time = self._as_absolute_time_if_needed(
"transit_time", transit_time
)
duration = units.Quantity(duration, unit=self._trel.unit)
if has_units(self.y):
depth = units.Quantity(depth, unit=self.y.unit)
depth_err = units.Quantity(depth_err, unit=self.y.unit)
depth_snr = units.Quantity(depth_snr, unit=units.one)
if self.dy is None:
if objective == "likelihood":
power = units.Quantity(power, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=units.one)
return BoxLeastSquaresResults(
objective,
period,
power,
depth,
depth_err,
duration,
transit_time,
depth_snr,
log_likelihood,
)
def _t_unit(self):
if has_units(self._trel):
return self._trel.unit
else:
return 1
def _y_unit(self):
if has_units(self.y):
return self.y.unit
else:
return 1
class BoxLeastSquaresResults(dict):
"""The results of a BoxLeastSquares search.
Attributes
----------
objective : str
The scalar used to optimize to find the best fit phase, duration, and
depth. See :func:`BoxLeastSquares.power` for more information.
period : array-like or `~astropy.units.Quantity` ['time']
The set of test periods.
power : array-like or `~astropy.units.Quantity`
The periodogram evaluated at the periods in ``period``. If
``objective`` is:
* ``'likelihood'``: the values of ``power`` are the
log likelihood maximized over phase, depth, and duration, or
* ``'snr'``: the values of ``power`` are the signal-to-noise with
which the depth is measured maximized over phase, depth, and
duration.
depth : array-like or `~astropy.units.Quantity`
The estimated depth of the maximum power model at each period.
depth_err : array-like or `~astropy.units.Quantity`
The 1-sigma uncertainty on ``depth``.
duration : array-like or `~astropy.units.Quantity` ['time']
The maximum power duration at each period.
transit_time : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like or `~astropy.units.Quantity`
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like or `~astropy.units.Quantity`
The log likelihood of the maximum power model.
"""
def __init__(self, *args):
super().__init__(
zip(
(
"objective",
"period",
"power",
"depth",
"depth_err",
"duration",
"transit_time",
"depth_snr",
"log_likelihood",
),
args,
)
)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return "\n".join(
[k.rjust(m) + ": " + repr(v) for k, v in sorted(self.items())]
)
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
|
aa8607008c428f24428bc92c55f0a90a0e4ddddd54fc45eb4579883445619c32 | """Main Lomb-Scargle Multiband Implementation"""
import numpy as np
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.timeseries.binned import BinnedTimeSeries
from astropy.timeseries.periodograms.lombscargle import LombScargle
from astropy.timeseries.sampled import TimeSeries
from .implementations import available_methods, lombscargle_multiband
from .implementations.mle import construct_regularization, design_matrix, periodic_fit
__all__ = ["LombScargleMultiband"]
def has_units(obj):
return hasattr(obj, "unit")
def get_unit(obj):
return getattr(obj, "unit", 1)
def strip_units(*arrs):
strip = lambda a: None if a is None else np.asarray(a)
if len(arrs) == 1:
return strip(arrs[0])
else:
return map(strip, arrs)
class LombScargleMultiband(LombScargle):
"""Compute the Lomb-Scargle Periodogram.
This implementation is based on code presented in [1]_ and [2]_;
if you use this functionality in an academic application, citation of
those works would be appreciated.
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
sequence of observation times
y : array-like or `~astropy.units.Quantity`
sequence of observations associated with times t
bands : array-like
sequence of passband labels associated with times t, each unique label
defines a single band of data.
dy : float, array-like, or `~astropy.units.Quantity`, optional
error or sequence of observational errors associated with times t
normalization : {'standard', 'model', 'log', 'psd'}, optional
Normalization to use for the periodogram.
nterms_base : int, optional
number of frequency terms to use for the base model common to all bands.
In the case of the fast algorithm, this parameter is passed along to
the single band LombScargle method as the ``nterms`` parameter.
nterms_band : int, optional
number of frequency terms to use for the residuals between the base
model and each individual band
reg_base : float or None (default = None)
amount of regularization to use on the base model parameters
reg_band : float or None (default = 1E-6)
amount of regularization to use on the band model parameters
regularize_by_trace : bool (default = True)
if True, then regularization is expressed in units of the trace of
the normal matrix
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage. Only applicable to the "fast" method
References
----------
.. [1] Vanderplas, J., Connolly, A. Ivezic, Z. & Gray, A. *Introduction to
astroML: Machine learning for astrophysics*. Proceedings of the
Conference on Intelligent Data Understanding (2012)
.. [2] VanderPlas, J. & Ivezic, Z. *Periodograms for Multiband Astronomical
Time Series*. ApJ 812.1:18 (2015)
"""
available_methods = available_methods()
def __init__(
self,
t,
y,
bands,
dy=None,
normalization="standard",
nterms_base=1,
nterms_band=1,
reg_base=None,
reg_band=1e-6,
regularize_by_trace=True,
center_data=True,
fit_mean=True,
):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to("day")
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, Time):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.bands, self.dy = self._validate_inputs(
trel, y, bands, dy
)
self.normalization = normalization
self.nterms_base = nterms_base
self.nterms_band = nterms_band
self.reg_base = reg_base
self.reg_band = reg_band
self.regularize_by_trace = regularize_by_trace
self.center_data = center_data
self.fit_mean = fit_mean
self.nterms = self.nterms_base # determined by the base model params
@classmethod
def from_timeseries(
cls,
timeseries,
signal_column=None,
uncertainty_column=None,
band_labels=None,
**kwargs,
):
"""
Initialize a multiband periodogram from a time series object.
If a binned time series is passed, the time at the center of the bins is
used. Also note that this method automatically gets rid of NaN/undefined
values when initializing the periodogram.
Parameters
----------
signal_column : list
The names of columns containing the signal values to use.
uncertainty_column : list, optional
The names of columns containing the errors on the signal.
band_labels : list, optional
The labels for each band, matched by index. If none, uses the
labels of ``signal_column`` as band names.
**kwargs
Additional keyword arguments are passed to the initializer for this
periodogram class.
"""
if signal_column is None:
raise ValueError(
"signal_column_name should be set to a list of valid column names"
)
if band_labels is not None:
if len(band_labels) != len(signal_column):
raise ValueError(
"band_labels have an equal number of elements to signal_column"
)
else:
band_labels = signal_column # use the flux labels as band labels
if isinstance(timeseries, TimeSeries):
time = timeseries.time
elif isinstance(timeseries, BinnedTimeSeries):
time = timeseries.time_bin_center
else:
raise TypeError(
"Input time series should be an instance of "
"TimeSeries or BinnedTimeSeries"
)
# Build lombscargle_multiband inputs
t = []
y = []
dy = []
band = []
for i, col in enumerate(signal_column):
if np.ma.is_masked(timeseries[col]):
signal_mask = ~timeseries[col].mask
else:
signal_mask = ~np.isnan(timeseries[col])
if uncertainty_column is not None:
dy_col = timeseries[uncertainty_column[i]]
if np.ma.is_masked(dy_col):
signal_mask &= ~dy_col.mask
else:
signal_mask &= ~np.isnan(dy_col)
t.append(time[signal_mask].mjd * u.day)
y.append(timeseries[col][signal_mask])
band.append([band_labels[i]] * sum(signal_mask))
dy.append(timeseries[uncertainty_column[i]][signal_mask])
t = np.hstack(t)
y = np.hstack(y)
band = np.hstack(band)
if uncertainty_column is not None:
dy = np.hstack(dy)
if len(dy) == 0:
dy = None
return cls(t, y, band, dy=dy, **kwargs)
def _validate_inputs(self, t, y, bands, dy):
# Validate shapes of inputs
if dy is None:
t, y, bands = np.broadcast_arrays(t, y, bands, subok=True)
else:
t, y, bands, dy = np.broadcast_arrays(t, y, bands, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, bands, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if any(has_units(arr) for arr in (t, y, bands, dy)):
t, y = map(u.Quantity, (t, y))
if dy is not None:
dy = u.Quantity(dy)
try:
dy = u.Quantity(dy, unit=y.unit)
except u.UnitConversionError:
raise ValueError("Units of dy not equivalent " "to units of y")
return t, y, bands, dy
def autofrequency(
self,
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
return_freq_limits=False,
):
"""Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float, optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float, optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
return_freq_limits : bool, optional
if True, return only the frequency limits rather than the full
frequency grid.
Returns
-------
frequency : ndarray or `~astropy.units.Quantity` ['frequency']
The heuristically-determined optimal frequency bin
"""
if hasattr(self._trel, "unit"):
unit = self._trel.unit
trel = self._trel.to(u.day) # do frequency calculations in days
else:
trel = self._trel
baseline = trel.max() - trel.min()
n_samples = trel.size
df = 1.0 / baseline / samples_per_peak
if minimum_frequency is None:
minimum_frequency = 0.5 * df
if maximum_frequency is None:
avg_nyquist = 0.5 * n_samples / baseline
maximum_frequency = nyquist_factor * avg_nyquist
# Convert back to the input units
if hasattr(self._trel, "unit"):
df = df.to(1 / unit)
minimum_frequency = minimum_frequency.to(1 / unit)
maximum_frequency = maximum_frequency.to(1 / unit)
n_freq = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
if return_freq_limits:
return minimum_frequency, minimum_frequency + df * (n_freq - 1)
else:
return minimum_frequency + df * np.arange(n_freq)
def autopower(
self,
method="flexible",
sb_method="auto",
normalization="standard",
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
):
"""Compute Lomb-Scargle power at automatically-determined frequencies.
Parameters
----------
method : str, optional
specify the multi-band lomb scargle implementation to use. Options are:
- 'flexible': Constructs a common model, and an offset model per individual
band. Applies regularization to the resulting model to constrain
complexity.
- 'fast': Passes each band individually through LombScargle (single-band),
combines periodograms at the end by weight. Speed depends on single-band
method chosen in 'sb_method'.
sb_method : str, optional
specify the single-band lomb scargle implementation to use, only in
the case of using the 'fast' multiband method. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
Returns
-------
frequency, power : ndarray
The frequency and Lomb-Scargle power
"""
frequency = self.autofrequency(
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
)
power = self.power(
frequency, method=method, sb_method=sb_method, normalization=normalization
)
return frequency, power
def power(
self, frequency, method="flexible", sb_method="auto", normalization="standard"
):
"""Compute the Lomb-Scargle power at the given frequencies.
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : str, optional
specify the multi-band lomb scargle implementation to use. Options are:
- 'flexible': Constructs a common model, and an offset model per individual
band. Applies regularization to the resulting model to constrain
complexity.
- 'fast': Passes each band individually through LombScargle (single-band),
combines periodograms at the end by weight. Speed depends on single-band
method chosen in 'sb_method'.
sb_method : str, optional
specify the single-band lomb scargle implementation to use, only in
the case of using the 'fast' multiband method. Options can be found
in `~astropy.timeseries.LombScargle`.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
if normalization is None:
normalization = self.normalization
frequency = self._validate_frequency(frequency)
f_shape = np.shape(frequency)
power = lombscargle_multiband(
strip_units(self._trel),
strip_units(self.y),
strip_units(self.bands),
dy=strip_units(self.dy),
frequency=strip_units(np.ravel(frequency)),
method=method,
sb_method=sb_method,
normalization=normalization,
nterms_base=self.nterms_base,
nterms_band=self.nterms_band,
reg_base=self.reg_base,
reg_band=self.reg_band,
regularize_by_trace=self.regularize_by_trace,
center_data=self.center_data,
fit_mean=self.fit_mean,
)
return np.reshape(power * self._power_unit(normalization), f_shape)
def design_matrix(self, frequency, t_fit=None, bands_fit=None):
"""Compute the design matrix for a given frequency
Parameters
----------
frequency : float
the frequency for the model
t_fit : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional)
Times (length ``n_samples``) at which to compute the model.
If not specified, then the times and uncertainties of the input
data are used.
bands_fit : array-like, or str
Bands to use in fitting, must be subset of bands in input data.
Returns
-------
ndarray
The design matrix for the model at the given frequency.
This should have a shape of (``len(t)``, ``n_parameters``).
See Also
--------
model
model_parameters
offset
"""
if t_fit is None:
t_fit, dy = strip_units(self._trel, self.dy)
else:
t_fit, dy = strip_units(
self._validate_t(self._as_relative_time("t", t_fit)), None
)
if bands_fit is None:
bands_fit = np.unique(self.bands)
elif type(bands_fit) == str:
bands_fit = [bands_fit]
unique_bands = np.unique(bands_fit)
unique_bands_fit = np.unique(bands_fit)
bands_fit = bands_fit[:, np.newaxis]
if not set(unique_bands_fit).issubset(set(unique_bands)):
raise ValueError(
"bands_fit does not match training data: "
f"input: {set(unique_bands_fit)} output: {set(unique_bands)}"
)
t_fit, bands_fit = np.broadcast_arrays(t_fit, bands_fit)
return design_matrix(
t_fit.ravel(),
bands_fit.ravel(),
frequency,
dy,
nterms_base=self.nterms_base,
nterms_band=self.nterms_band,
)
def model(self, t, frequency, bands_fit=None):
"""Compute the Lomb-Scargle model at the given frequency.
The model at a particular frequency is a linear model:
model = offset + dot(design_matrix, model_parameters)
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times (length ``n_samples``) at which to compute the model.
frequency : float
the frequency for the model
bands_fit : list or array-like
the unique bands to fit in the model
Returns
-------
y : np.ndarray
The model fit corresponding to the input times.
Will have shape (``n_bands``,``n_samples``).
See Also
--------
design_matrix
offset
model_parameters
"""
if bands_fit is None:
bands_fit = np.unique(self.bands)
frequency = self._validate_frequency(frequency)
t = self._validate_t(self._as_relative_time("t", t))
y_fit = periodic_fit(
*strip_units(self._trel, self.y, self.dy),
bands=self.bands,
frequency=strip_units(frequency),
t_fit=strip_units(t),
bands_fit=bands_fit,
center_data=self.center_data,
nterms_base=self.nterms_base,
nterms_band=self.nterms_band,
reg_base=self.reg_base,
reg_band=self.reg_band,
regularize_by_trace=self.regularize_by_trace,
)
return y_fit * get_unit(self.y)
def offset(self, t_fit=None, bands_fit=None):
"""Return the offset array of the model
The offset array of the model is the (weighted) mean of the y values in each band.
Note that if self.center_data is False, the offset is 0 by definition.
Parameters
----------
t_fit : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional)
Times (length ``n_samples``) at which to compute the model.
If not specified, then the times and uncertainties of the input
data are used.
bands_fit : array-like, or str
Bands to use in fitting, must be subset of bands in input data.
Returns
-------
offset : array
See Also
--------
design_matrix
model
model_parameters
"""
if bands_fit is None:
bands_fit = np.unique(self.bands)
if t_fit is None:
on_fit = False
t_fit = self.t
else:
on_fit = True
bands_fit = bands_fit[:, np.newaxis]
unique_bands = np.unique(self.bands)
unique_bands_fit = np.unique(bands_fit)
if not set(unique_bands_fit).issubset(set(unique_bands)):
raise ValueError(
"bands_fit does not match training data: "
f"input: {set(unique_bands_fit)} output: {set(unique_bands)}"
)
y, dy = strip_units(self.y, self.dy)
if np.shape(t_fit) != np.shape(
bands_fit
): # No need to broadcast if bands map to timestamps
t_fit, bands_fit = np.broadcast_arrays(t_fit, bands_fit)
# need to make sure all unique filters are represented
u, i = np.unique(
np.concatenate([bands_fit.ravel(), unique_bands]), return_inverse=True
)
if not self.center_data:
return 0
if dy is None:
dy = 1
dy = np.broadcast_to(dy, y.shape)
# Calculate ymeans -- per band
ymeans = np.zeros(y.shape) # filter specific means
for band in unique_bands:
mask = self.bands == band
ymeans[mask] = np.average(y[mask], weights=1 / dy[mask] ** 2)
ymeans_fit = ymeans[i[: -len(unique_bands)]]
if on_fit:
return ymeans_fit * get_unit(self.y)
else:
return ymeans * get_unit(self.y)
def model_parameters(self, frequency, units=True):
r"""Compute the best-fit model parameters at the given frequency.
The model described by these parameters is:
.. math::
y(t; f, \vec{\theta}) = \theta_0 + \sum_{n=1}^{\tt nterms_base} [\theta_{2n-1}\sin(2\pi n f t) + \theta_{2n}\cos(2\pi n f t)]
+ \theta_0^{(k)} + \sum_{n=1}^{\tt nterms_band} [\theta_{2n-1}^{(k)}\sin(2\pi n f t) +
where :math:`\vec{\theta}` is the array of parameters returned by this function.
Parameters
----------
frequency : float
the frequency for the model
units : bool
If True (default), return design matrix with data units.
Returns
-------
theta : np.ndarray (n_parameters,)
The best-fit model parameters at the given frequency.
See Also
--------
design_matrix
model
offset
"""
frequency = self._validate_frequency(frequency)
t, y, dy = strip_units(self._trel, self.y, self.dy)
if self.center_data:
y = y - strip_units(self.offset())
dy = np.ones_like(y) if dy is None else np.asarray(dy)
X = design_matrix(
t,
self.bands,
frequency,
dy=dy,
nterms_base=self.nterms_base,
nterms_band=self.nterms_band,
)
regularization = construct_regularization(
self.bands,
nterms_base=self.nterms_base,
nterms_band=self.nterms_band,
reg_base=self.reg_base,
reg_band=self.reg_band,
)
M = np.dot(X.T, X)
if regularization is not None:
# M is being affected by operations on diag
diag = M.ravel(order="K")[:: M.shape[0] + 1]
if self.regularize_by_trace:
diag += diag.sum() * np.asarray(regularization)
else:
diag += np.asarray(regularization)
try:
parameters = np.linalg.solve(M, np.dot(X.T, y / dy))
except np.linalg.LinAlgError:
parameters = np.dot(M, np.linalg.lstsq(X.T, y / dy)[0])
if units:
parameters = get_unit(self.y) * parameters
return parameters
def false_alarm_probability(self):
"""Not Implemented"""
raise NotImplementedError
def false_alarm_level(self):
"""Not Implemented"""
raise NotImplementedError
def distribution(self):
"""Not Implemented"""
raise NotImplementedError
|
43ce43f8c07c1c23b8db5592a6b8c0491dd12d33b1e3bd96fa6a06fa2b7bfbdc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .core import LombScargleMultiband
|
497ed8e58b0207dd6bc4e4455b0a34bef3dbdac23ee630da7d2ccffae72e0ec4 | """
Main Lomb-Scargle Multiband Implementation
The ``lombscarglemultiband`` function here is essentially a switch
statement for the various implementations available in this submodule
"""
__all__ = ["lombscargle_multiband", "available_methods"]
from .mbfast_impl import lombscargle_mbfast
from .mbflex_impl import lombscargle_mbflex
def available_methods():
methods = ["fast", "flexible"]
return methods
def lombscargle_multiband(
t,
y,
bands,
dy=None,
frequency=None,
method="flexible",
sb_method="auto",
assume_regular_frequency=False,
normalization="standard",
fit_mean=True,
center_data=True,
method_kwds=None,
nterms_base=1,
nterms_band=1,
reg_base=None,
reg_band=1e-6,
regularize_by_trace=True,
fit_period=False,
):
methods = available_methods()
if method == "flexible":
power = lombscargle_mbflex(
t,
y,
bands,
frequency,
dy=dy,
nterms_base=nterms_base,
nterms_band=nterms_band,
reg_base=reg_base,
reg_band=reg_band,
regularize_by_trace=regularize_by_trace,
center_data=center_data,
)
elif method == "fast":
power = lombscargle_mbfast(
t,
y,
bands,
dy,
frequency=frequency,
sb_method=sb_method,
assume_regular_frequency=assume_regular_frequency,
normalization=normalization,
fit_mean=fit_mean,
center_data=center_data,
method_kwds=method_kwds,
nterms=nterms_base,
) # nterms_base used for single_band nterms
if method not in methods:
raise ValueError(f"Invalid Method: {method}")
return power
|
462a50d3aad2f1096718aad353b0220f20ced917a8506699887e648fd9388495 | import numpy as np
__all__ = ["lombscargle_mbflex"]
def lombscargle_mbflex(
t,
y,
bands,
frequency,
dy=None,
nterms_base=1,
nterms_band=1,
reg_base=None,
reg_band=1e-6,
regularize_by_trace=True,
center_data=True,
):
if (nterms_base == 0) and (nterms_band == 0):
raise ValueError(
"At least one of nterms_base and nterms_band must be greater than 0."
)
# Inputs
unique_bands = np.unique(bands)
t = np.asarray(t)
y = np.asarray(y)
bands = np.asarray(bands)
frequency = np.asarray(frequency)
# Create a ones array for dy (errors) if not provided
if dy is not None:
dy = np.asarray(dy)
else:
dy = np.ones(y.shape)
# Calculate ymeans -- per band/filter
ymeans = np.zeros(
y.shape
) # An array of shape y, with each index given a filter specific mean
for band in unique_bands:
mask = bands == band
ymeans[mask] = np.average(y[mask], weights=1 / dy[mask] ** 2)
# Construct weighted y matrix
if center_data:
y = y - ymeans
yw = y / dy # weighted by dy, as above; one's array if not provided
# Construct Regularization
if reg_base is None and reg_band is None:
regularization = 0
else:
n_base = 1 + 2 * nterms_base
n_band = 1 + 2 * nterms_band
regularization = np.zeros(n_base + len(unique_bands) * n_band)
if reg_base is not None:
regularization[:n_base] = reg_base
if reg_band is not None:
regularization[n_base:] = reg_band
# Scoring
omegas = 2 * np.pi * frequency
# Calculate chi-squared
chi2_0 = np.dot(yw.T, yw)
chi2_ref = np.copy(chi2_0) # reference chi2 for later comparison
# Iterate through the omegas and compute the power for each
chi2_0_minus_chi2 = []
for i, omega in enumerate(omegas.flat):
# Construct X - design matrix of the stationary sinusoid model
cols = [np.ones(len(t))]
cols = sum(
(
[np.sin((i + 1) * omega * t), np.cos((i + 1) * omega * t)]
for i in range(nterms_base)
),
cols,
)
# Add band columns for the multiband model, binary flag indicating the band of a given observation
for band in unique_bands:
cols.append(np.ones(len(t)))
cols = sum(
(
[np.sin((i + 1) * omega * t), np.cos((i + 1) * omega * t)]
for i in range(nterms_band)
),
cols,
)
mask = bands == band
for i in range(-1 - 2 * nterms_band, 0):
cols[i][~mask] = 0
X = np.transpose(np.vstack(cols) / dy) # weighted
M = np.dot(X.T, X)
if regularization is not None:
diag = M.ravel(order="K")[
:: M.shape[0] + 1
] # M is being affected by operations on diag
if regularize_by_trace:
diag += diag.sum() * np.asarray(regularization)
else:
diag += np.asarray(regularization)
# Construct Xw, XTX, XTy
Xw = X
XTX = M
XTy = np.dot(Xw.T, yw)
# Matrix Algebra to calculate the Lomb-Scargle power at each omega step
try:
chi2_0_minus_chi2.append(np.dot(XTy.T, np.linalg.solve(XTX, XTy)))
# If X'X is not invertible, use pseudoinverse instead
except np.linalg.LinAlgError:
chi2_0_minus_chi2.append(
np.dot(XTy.T, np.linalg.lstsq(XTX, XTy, rcond=None)[0])
)
# Construct and return the power from the chi2 difference
if center_data:
P = chi2_0_minus_chi2 / chi2_ref
else:
P = 1 + (chi2_0_minus_chi2 - chi2_0) / chi2_ref
return P
|
ae08a08f0c7ab6087b9f3c115d8137119e7e8a0d59ccd05330a7d9523bfcf2cc | """Various implementations of the Multiband Lomb-Scargle Periodogram"""
from .main import available_methods, lombscargle_multiband
from .mbfast_impl import lombscargle_mbfast
from .mbflex_impl import lombscargle_mbflex
|
0cee1ba5d9eb8daea45f5605e20b768129a6709eb794ba2f954deed4c64029a4 | import numpy as np
from astropy.timeseries.periodograms.lombscargle.implementations import lombscargle
__all__ = ["lombscargle_mbfast"]
def lombscargle_mbfast(
t,
y,
bands,
dy=None,
frequency=None,
sb_method="auto",
assume_regular_frequency=False,
normalization="standard",
fit_mean=True,
center_data=True,
method_kwds=None,
nterms=1,
):
# create masks for each filter/bandpass
unique_bands = np.unique(bands)
masks = [(bands == band) for band in unique_bands]
# calculate singleband powers for each filter/bandpass
if dy is not None:
models = [
lombscargle(
t[mask],
y[mask],
dy=dy[mask],
frequency=frequency,
method=sb_method,
assume_regular_frequency=assume_regular_frequency,
normalization=normalization,
fit_mean=fit_mean,
center_data=center_data,
method_kwds=method_kwds,
nterms=nterms,
)
for mask in masks
]
else:
models = [
lombscargle(
t[mask],
y[mask],
dy=None,
frequency=frequency,
method=sb_method,
assume_regular_frequency=assume_regular_frequency,
normalization=normalization,
fit_mean=fit_mean,
center_data=center_data,
method_kwds=method_kwds,
nterms=nterms,
)
for mask in masks
]
# Total score is the sum of powers weighted by chi2-normalization
powers = np.array(models)
chi2_0 = np.array([np.sum(model**2) for model in models])
return np.dot(chi2_0 / chi2_0.sum(), powers)
|
b28ba7cf94204b54ba5123dfdc9e09ec00c4f969370a70cff5bd951b0a5d974f | import numpy as np
__all__ = ["design_matrix", "construct_regularization", "periodic_fit"]
def design_matrix(t, bands, frequency, dy=None, nterms_base=1, nterms_band=1):
t = np.asarray(t)
omega = np.asarray(2 * np.pi * frequency)
unique_bands = np.unique(bands)
# Construct X - design matrix of the stationary sinusoid model
cols = [np.ones(len(t))]
cols = sum(
(
[np.sin((i + 1) * omega * t), np.cos((i + 1) * omega * t)]
for i in range(nterms_base)
),
cols,
)
# Add band columns for the multiband model, binary flag indicating the band of a given observation
for band in unique_bands:
cols.append(np.ones(len(t))) # A bias column is added by default
cols = sum(
(
[np.sin((i + 1) * omega * t), np.cos((i + 1) * omega * t)]
for i in range(nterms_band)
),
cols,
)
mask = bands == band
for i in range(-1 - 2 * nterms_band, 0):
cols[i][~mask] = 0
if dy is not None:
XT = np.transpose(np.vstack(cols) / dy) # weighted
else:
XT = np.transpose(np.vstack(cols))
return XT
def construct_regularization(
bands, nterms_base=1, nterms_band=1, reg_base=None, reg_band=1e-6
):
unique_bands = np.unique(bands)
# Construct Regularization
if reg_base is None and reg_band is None:
regularization = 0
else:
Nbase = 1 + 2 * nterms_base
Nband = 1 + 2 * nterms_band
regularization = np.zeros(Nbase + len(unique_bands) * Nband)
if reg_base is not None:
regularization[:Nbase] = reg_base
if reg_band is not None:
regularization[Nbase:] = reg_band
return regularization
def periodic_fit(
t,
y,
dy,
bands,
frequency,
t_fit,
bands_fit,
center_data=True,
nterms_base=1,
nterms_band=1,
reg_base=None,
reg_band=1e-6,
regularize_by_trace=True,
):
"""Compute the Lomb-Scargle model fit at a given frequency
Parameters
----------
t, y, dy : float or array-like
The times, observations, and uncertainties to fit
bands : str, or array-like
The bands of each observation
frequency : float
The frequency at which to compute the model
t_fit : float or array-like
The times at which the fit should be computed
center_data : bool (default=True)
If True, center the input data before applying the fit
nterms : int (default=1)
The number of Fourier terms to include in the fit
Returns
-------
y_fit : ndarray
The model fit evaluated at each value of t_fit
"""
t, y, bands, frequency = map(np.asarray, (t, y, bands, frequency))
bands_fit = bands_fit[:, np.newaxis]
unique_bands = np.unique(bands)
unique_bands_fit = np.unique(bands_fit)
if not set(unique_bands_fit).issubset(set(unique_bands)):
raise ValueError(
"bands_fit does not match training data: "
f"input: {set(unique_bands_fit)} output: {set(unique_bands)}"
)
t_fit, bands_fit = np.broadcast_arrays(t_fit, bands_fit)
if dy is None:
dy = np.ones_like(y)
else:
dy = np.asarray(dy)
if t.ndim != 1 or y.ndim != 1 or dy.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 0:
raise ValueError("frequency should be a scalar")
# need to make sure all unique filters are represented
u, i = np.unique(
np.concatenate([bands_fit.ravel(), unique_bands]), return_inverse=True
)
# Calculate ymeans
ymeans = np.zeros(
y.shape
) # An array of shape y, with each index given a filter specific mean
if center_data:
for band in unique_bands:
mask = bands == band
ymeans[mask] = np.average(y[mask], weights=1 / dy[mask] ** 2)
y = y - ymeans
ymeans_fit = ymeans[i[: -len(unique_bands)]]
# Theta -- Construct X and M from t and bands, using weighting
X = design_matrix(
t, bands, frequency, dy=dy, nterms_base=nterms_base, nterms_band=nterms_band
)
M = np.dot(X.T, X)
regularization = construct_regularization(
bands,
nterms_base=nterms_base,
nterms_band=nterms_band,
reg_base=reg_base,
reg_band=reg_band,
)
if regularization is not None:
diag = M.ravel(order="K")[
:: M.shape[0] + 1
] # M is being affected by operations on diag
if regularize_by_trace:
diag += diag.sum() * np.asarray(regularization)
else:
diag += np.asarray(regularization)
theta_MLE = np.linalg.solve(M, np.dot(X.T, y / dy))
# Fit to t_fit and bands_fit
X_fit = design_matrix(
t_fit.ravel(),
bands_fit.ravel(),
frequency,
dy=None,
nterms_base=nterms_base,
nterms_band=nterms_band,
)
if center_data:
y_fit = ymeans_fit + np.dot(X_fit, theta_MLE)
else:
y_fit = np.dot(X_fit, theta_MLE)
return y_fit.reshape(np.shape(t_fit))
|
22dfd91d2baf7c54bd9a40d02478b3b4c85d40fad4c253c5f67c2cceb9956a77 | import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time, TimeDelta
from astropy.timeseries import TimeSeries
from astropy.table import MaskedColumn
from astropy.timeseries.periodograms.lombscargle_multiband import LombScargleMultiband
from astropy.timeseries.periodograms.lombscargle import LombScargle
ALL_METHODS = LombScargleMultiband.available_methods
ALL_SB_METHODS = LombScargle.available_methods
NORMALIZATIONS = ["standard", "psd", "log", "model"]
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], nbands=3, dy=1, rseed=0):
"""Generate some data for testing"""
t_arr = []
y_arr = []
band_arr = []
dy_arr = []
for band in range(nbands):
rng = np.random.default_rng(rseed + band)
t_band = 20 * period * rng.random(N)
omega = 2 * np.pi / period
y_band = (
theta[0]
+ theta[1] * np.sin(omega * t_band)
+ theta[2] * np.cos(omega * t_band)
)
dy_band = dy * (0.5 + rng.random(N))
y_band += dy_band * rng.standard_normal(N)
t_arr += list(t_band)
y_arr += list(y_band)
dy_arr += list(dy_band)
band_arr += ["a" * (band + 1)] * N # labels bands as "a","aa","aaa",....
t_arr = np.array(t_arr)
y_arr = np.array(y_arr)
band_arr = np.array(band_arr)
dy_arr = np.array(dy_arr)
return t_arr, y_arr, band_arr, dy_arr
@pytest.fixture
def timeseries_data():
"""Generate an astropy.timeseries.TimeSeries table"""
rng = np.random.default_rng(1)
deltas = 240 * rng.random(180)
ts1 = TimeSeries(time_start="2011-01-01T00:00:00", time_delta=deltas * u.minute)
# g band fluxes
g_flux = [0] * 180 * u.mJy
g_err = [0] * 180 * u.mJy
y_g = np.round(3 + 2 * np.sin(10 * np.pi * ts1["time"].mjd[0:60]), 3)
dy_g = np.round(0.01 * (0.5 + rng.random(60)), 3) # uncertainties
g_flux.value[0:60] = y_g
g_err.value[0:60] = dy_g
ts1["g_flux"] = MaskedColumn(g_flux, mask=[False] * 60 + [True] * 120)
ts1["g_err"] = MaskedColumn(g_err, mask=[False] * 60 + [True] * 120)
# r band fluxes
r_flux = [0] * 180 * u.mJy
r_err = [0] * 180 * u.mJy
y_r = np.round(3 + 2 * np.sin(10 * np.pi * ts1["time"].mjd[60:120]), 3)
dy_r = np.round(0.01 * (0.5 + rng.random(60)), 3) # uncertainties
r_flux.value[60:120] = y_r
r_err.value[60:120] = dy_r
ts1["r_flux"] = MaskedColumn(r_flux, mask=[True] * 60 + [False] * 60 + [True] * 60)
ts1["r_err"] = MaskedColumn(r_err, mask=[True] * 60 + [False] * 60 + [True] * 60)
# i band fluxes
i_flux = [0] * 180 * u.mJy
i_err = [0] * 180 * u.mJy
y_i = np.round(3 + 2 * np.sin(10 * np.pi * ts1["time"].mjd[120:]), 3)
dy_i = np.round(0.01 * (0.5 + rng.random(60)), 3) # uncertainties
i_flux.value[120:] = y_i
i_err.value[120:] = dy_i
ts1["i_flux"] = MaskedColumn(i_flux, mask=[True] * 120 + [False] * 60)
ts1["i_err"] = MaskedColumn(i_err, mask=[True] * 120 + [False] * 60)
return ts1
@pytest.mark.parametrize("minimum_frequency", [None, 1.0])
@pytest.mark.parametrize("maximum_frequency", [None, 5.0])
@pytest.mark.parametrize("nyquist_factor", [1, 10])
@pytest.mark.parametrize("samples_per_peak", [1, 5])
def test_autofrequency(
data, minimum_frequency, maximum_frequency, nyquist_factor, samples_per_peak
):
t, y, band, dy = data
baseline = t.max() - t.min()
freq = LombScargleMultiband(t, y, band, dy).autofrequency(
samples_per_peak, nyquist_factor, minimum_frequency, maximum_frequency
)
df = freq[1] - freq[0]
# Check sample spacing
assert_allclose(df, 1.0 / baseline / samples_per_peak)
# Check minimum frequency
if minimum_frequency is None:
assert_allclose(freq[0], 0.5 * df)
else:
assert_allclose(freq[0], minimum_frequency)
if maximum_frequency is None:
avg_nyquist = 0.5 * len(t) / baseline
assert_allclose(freq[-1], avg_nyquist * nyquist_factor, atol=0.5 * df)
else:
assert_allclose(freq[-1], maximum_frequency, atol=0.5 * df)
@pytest.mark.parametrize("method", ALL_METHODS)
@pytest.mark.parametrize("nterms_base", [1, 3])
@pytest.mark.parametrize("nterms_band", [0, 1])
@pytest.mark.parametrize("center_data", [True, False])
@pytest.mark.parametrize("errors", ["none", "partial", "full"])
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
def test_all_methods(
data,
method,
nterms_base,
nterms_band,
center_data,
errors,
with_units,
normalization,
):
t, y, band, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == "none":
dy = None
elif errors == "partial":
dy = dy[0]
elif errors == "full":
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
kwds = {}
ls = LombScargleMultiband(
t,
y,
band,
dy,
nterms_base=nterms_base,
nterms_band=nterms_band,
center_data=center_data,
normalization=normalization,
)
P_expected = ls.power(frequency, method=method)
P_method = ls.power(frequency, method=method, **kwds)
freq_maxpower = frequency[np.argmax(P_method)]
if with_units:
assert P_method.unit == u.dimensionless_unscaled
assert np.isclose(
freq_maxpower.value, 1.0, rtol=1e-2
) # period=1 check peak frequency
else:
assert not hasattr(P_method, "unit")
assert np.isclose(
freq_maxpower, 1.0, rtol=1e-2
) # period=1, check peak frequency
assert_quantity_allclose(P_expected, P_method)
@pytest.mark.parametrize("method", ALL_METHODS)
@pytest.mark.parametrize("nterms_base", [1, 3])
@pytest.mark.parametrize("nterms_band", [0, 1])
@pytest.mark.parametrize("center_data", [True, False])
@pytest.mark.parametrize("with_errors", [True, False])
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
def test_integer_inputs(
data, method, nterms_base, nterms_band, center_data, with_errors, normalization
):
if method == "scipy" and with_errors:
return
t, y, band, dy = data
t = np.floor(100 * t)
t_int = t.astype(int)
y = np.floor(100 * y)
y_int = y.astype(int)
dy = np.floor(100 * dy)
dy_int = dy.astype("int32")
frequency = 1e-2 * (0.8 + 0.01 * np.arange(40))
if not with_errors:
dy = None
dy_int = None
kwds = dict(center_data=center_data, normalization=normalization)
P_float = LombScargleMultiband(t, y, band, dy, **kwds).power(
frequency, method=method
)
P_int = LombScargleMultiband(t_int, y_int, band, dy_int, **kwds).power(
frequency, method=method
)
assert_allclose(P_float, P_int)
@pytest.mark.parametrize("method", ["flexible"])
@pytest.mark.parametrize("nterms_base", [0, 1, 2, 3])
@pytest.mark.parametrize("nterms_band", [0, 1, 2])
@pytest.mark.parametrize("center_data", [True, False])
@pytest.mark.parametrize("errors", ["none", "partial", "full"])
@pytest.mark.parametrize("normalization", NORMALIZATIONS)
def test_nterms_methods(
method, nterms_base, nterms_band, center_data, errors, normalization, data
):
t, y, band, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if errors == "none":
dy = None
elif errors == "partial":
dy = dy[0]
elif errors == "full":
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
ls = LombScargleMultiband(
t,
y,
band,
dy,
center_data=center_data,
nterms_base=nterms_base,
nterms_band=nterms_band,
normalization=normalization,
)
if (nterms_base == 0) and (nterms_band == 0):
with pytest.raises(ValueError) as err:
ls.power(frequency, method=method)
assert "nterms_base" in str(err.value)
else:
P_expected = ls.power(frequency)
# don't use fast fft approximations here
kwds = {}
if "fast" in method:
kwds["method_kwds"] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
assert_allclose(P_expected, P_method, rtol=1e-7, atol=1e-25)
@pytest.mark.parametrize("method", ALL_METHODS)
@pytest.mark.parametrize("shape", [(), (1,), (2,), (3,), (2, 3)])
def test_output_shapes(method, shape, data):
t, y, band, dy = data
freq = np.asarray(np.zeros(shape))
freq.flat = np.arange(1, freq.size + 1)
PLS = LombScargleMultiband(t, y, band).power(freq, method=method)
assert PLS.shape == shape
@pytest.mark.parametrize("method", ALL_METHODS)
def test_errors_on_unit_mismatch(method, data):
t, y, band, dy = data
t = t * u.second
y = y * u.mag
frequency = np.linspace(0.5, 1.5, 10)
# this should fail because frequency and 1/t units do not match
with pytest.raises(ValueError) as err:
LombScargleMultiband(t, y, band).power(frequency, method=method)
assert str(err.value).startswith("Units of frequency not equivalent")
# this should fail because dy and y units do not match
with pytest.raises(ValueError) as err:
LombScargleMultiband(t, y, band, dy).power(frequency / t.unit)
assert str(err.value).startswith("Units of dy not equivalent")
@pytest.mark.parametrize("method", ALL_METHODS)
@pytest.mark.parametrize("with_error", [True, False])
def test_unit_conversions(data, method, with_error):
t, y, band, dy = data
t_day = t * u.day
t_hour = u.Quantity(t_day, "hour")
y_meter = y * u.meter
y_millimeter = u.Quantity(y_meter, "millimeter")
# sanity check on inputs
assert_quantity_allclose(t_day, t_hour)
assert_quantity_allclose(y_meter, y_millimeter)
if with_error:
dy = dy * u.meter
else:
dy = None
freq_day, P1 = LombScargleMultiband(t_day, y_meter, band, dy).autopower(
method=method
)
freq_hour, P2 = LombScargleMultiband(t_hour, y_millimeter, band, dy).autopower(
method=method
)
# Check units of frequency
assert freq_day.unit == 1.0 / u.day
assert freq_hour.unit == 1.0 / u.hour
# Check that results match
assert_quantity_allclose(freq_day, freq_hour)
assert_quantity_allclose(P1, P2)
# Check that switching frequency units doesn't change things
P3 = LombScargleMultiband(t_day, y_meter, band, dy).power(freq_hour, method=method)
P4 = LombScargleMultiband(t_hour, y_meter, band, dy).power(freq_day, method=method)
assert_quantity_allclose(P3, P4)
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("freq", [1.0, 2.0])
def test_model(with_units, freq):
rand = np.random.default_rng(0)
t = 10 * rand.random(120)
band = 40 * ["a"] + 40 * ["b"] + 40 * ["c"]
params = 10 * rand.random(3)
y = np.zeros_like(t)
y += params[1] * np.sin(2 * np.pi * freq * (t - params[2]))
if with_units:
t = t * u.day
y = y * u.mag
freq = freq / u.day
ls = LombScargleMultiband(t, y, band, center_data=False)
y_fit = ls.model(t, freq, bands_fit=None)
assert_quantity_allclose(y_fit[0][0:40], y[0:40])
assert_quantity_allclose(y_fit[1][40:80], y[40:80])
assert_quantity_allclose(y_fit[2][80:], y[80:])
@pytest.mark.parametrize("t_unit", [u.second, u.day])
@pytest.mark.parametrize("frequency_unit", [u.Hz, 1.0 / u.second])
@pytest.mark.parametrize("y_unit", [u.mag, u.jansky])
def test_model_units_match(data, t_unit, frequency_unit, y_unit):
t, y, band, dy = data
t_fit = t[:5]
frequency = 1.0
t = t * t_unit
t_fit = t_fit * t_unit
y = y * y_unit
dy = dy * y_unit
frequency = frequency * frequency_unit
ls = LombScargleMultiband(t, y, band, dy)
y_fit = ls.model(t_fit, frequency)
assert y_fit.unit == y_unit
def test_model_units_mismatch(data):
t, y, band, dy = data
frequency = 1.0
t_fit = t[:5]
t = t * u.second
t_fit = t_fit * u.second
y = y * u.mag
frequency = 1.0 / t.unit
# this should fail because frequency and 1/t units do not match
with pytest.raises(ValueError) as err:
LombScargleMultiband(t, y, band).model(t_fit, frequency=1.0)
assert str(err.value).startswith("Units of frequency not equivalent")
# this should fail because t and t_fit units do not match
with pytest.raises(ValueError) as err:
LombScargleMultiband(t, y, band).model([1, 2], frequency)
assert str(err.value).startswith("Units of t not equivalent")
# this should fail because dy and y units do not match
with pytest.raises(ValueError) as err:
LombScargleMultiband(t, y, band, dy).model(t_fit, frequency)
assert str(err.value).startswith("Units of dy not equivalent")
def test_autopower(data):
t, y, band, dy = data
ls = LombScargleMultiband(t, y, band, dy)
kwargs = dict(
samples_per_peak=6,
nyquist_factor=2,
minimum_frequency=2,
maximum_frequency=None,
)
freq1 = ls.autofrequency(**kwargs)
power1 = ls.power(freq1)
freq2, power2 = ls.autopower(**kwargs)
assert_allclose(freq1, freq2)
assert_allclose(power1, power2)
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("errors", ["none", "partial", "full"])
@pytest.mark.parametrize("center_data", [True, False])
@pytest.mark.parametrize("nterms_base", [0, 1, 2])
@pytest.mark.parametrize("nterms_band", [0, 1])
def test_model_parameters(
data, nterms_base, nterms_band, center_data, errors, with_units
):
if (nterms_base == 0) and (nterms_band == 0):
return
t, y, band, dy = data
frequency = 1.5
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == "none":
dy = None
elif errors == "partial":
dy = dy[0]
elif errors == "full":
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
ls = LombScargleMultiband(
t, y, band, dy, nterms_base=nterms_base, center_data=center_data
)
tfit = np.linspace(0, 20, 10)
if with_units:
tfit = tfit * u.day
model = ls.model(tfit, frequency)
params = ls.model_parameters(frequency)
design = ls.design_matrix(frequency, t_fit=tfit, bands_fit=None)
offset = ls.offset(t_fit=tfit)
if nterms_band == 0:
nterms_band = 1
assert len(params) == 1 + 2 * nterms_base + len(np.unique(band)) * (
2 * nterms_band + 1
)
from_funcs = offset + design.dot(params)
from_funcs = from_funcs.reshape((len(np.unique(band)), len(tfit)))
assert_quantity_allclose(from_funcs, model)
@pytest.mark.parametrize("timedelta", [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, band, dy = data
# FIXME: There seems to be a numerical stability issue in that if we run
# the algorithm with the same values but offset in time, the transit_time
# is not offset by a fixed amount. To avoid this issue in this test, we
# make sure the first time is also the smallest so that internally the
# values of the relative time should be the same.
t[0] = 0.0
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same
start = Time("2019-05-04T12:34:56")
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of LombScargle, one with absolute and one
# with relative times.
ls1 = LombScargleMultiband(t, y, band, dy)
ls2 = LombScargleMultiband(trel, y, band, dy)
kwargs = dict(
samples_per_peak=6,
nyquist_factor=2,
minimum_frequency=2 / u.day,
maximum_frequency=None,
)
freq1 = ls1.autofrequency(**kwargs)
freq2 = ls2.autofrequency(**kwargs)
assert_quantity_allclose(freq1, freq2)
power1 = ls1.power(freq1)
power2 = ls2.power(freq2)
assert_quantity_allclose(power1, power2)
freq1, power1 = ls1.autopower(**kwargs)
freq2, power2 = ls2.autopower(**kwargs)
assert_quantity_allclose(freq1, freq2)
assert_quantity_allclose(power1, power2)
model1 = ls1.model(t, 2 / u.day)
model2 = ls2.model(trel, 2 / u.day)
assert_quantity_allclose(model1, model2)
# Check model validation
with pytest.raises(TypeError) as exc:
ls1.model(trel, 2 / u.day)
assert exc.value.args[0] == (
"t was provided as a relative time but the "
"LombScargle class was initialized with "
"absolute times."
)
with pytest.raises(TypeError) as exc:
ls2.model(t, 2 / u.day)
assert exc.value.args[0] == (
"t was provided as an absolute time but the "
"LombScargle class was initialized with "
"relative times."
)
# Check design matrix
design1 = ls1.design_matrix(2 / u.day, t_fit=t)
design2 = ls2.design_matrix(2 / u.day, t_fit=trel)
assert_quantity_allclose(design1, design2)
# Check design matrix validation
with pytest.raises(TypeError) as exc:
ls1.design_matrix(2 / u.day, t_fit=trel)
assert exc.value.args[0] == (
"t was provided as a relative time but the "
"LombScargle class was initialized with "
"absolute times."
)
with pytest.raises(TypeError) as exc:
ls2.design_matrix(2 / u.day, t_fit=t)
assert exc.value.args[0] == (
"t was provided as an absolute time but the "
"LombScargle class was initialized with "
"relative times."
)
@pytest.mark.parametrize("uncertainty_column", [None, ["g_err", "r_err", "i_err"]])
@pytest.mark.parametrize("band_labels", [None, ["g", "r", "i"]])
def test_from_timeseries(timeseries_data, uncertainty_column, band_labels):
ts = timeseries_data
ls = LombScargleMultiband.from_timeseries(
ts,
signal_column=["g_flux", "r_flux", "i_flux"],
uncertainty_column=["g_err", "r_err", "i_err"],
band_labels=["g", "r", "i"],
)
frequency, power = ls.autopower()
freq_maxpower = frequency[np.argmax(power)]
assert_allclose(freq_maxpower.value, 5, rtol=0.01)
@pytest.mark.parametrize("errors", ["none", "partial", "full"])
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("sb_method", ALL_SB_METHODS)
def test_single_band_equivalence(data, with_units, errors, sb_method):
fit_mean = True
if sb_method == "scipy":
fit_mean = False
t, y, band, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
# select just one band
a_mask = band == "a"
t = t[a_mask]
y = y[a_mask]
band = band[a_mask]
dy = dy[a_mask]
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == "none":
dy = None
elif errors == "partial":
dy = dy[0]
elif errors == "full":
if sb_method == "scipy":
return
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
lsmb = LombScargleMultiband(t, y, band, dy, fit_mean=fit_mean)
P_lsmb = lsmb.power(frequency, method="fast", sb_method=sb_method)
ls = LombScargle(t, y, dy, fit_mean=fit_mean)
P_ls = ls.power(frequency, method=sb_method)
# test to see if lombscargle multiband and lombscargle are equivalent
assert_quantity_allclose(P_lsmb, P_ls)
|
0f2e8588336b071357821df737007f48ab9ef637364f4d7469b916e1f47303bf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for `astropy.cosmology`."""
from .comparison import cosmology_equal
from .optimize import z_at_value
__all__ = ["z_at_value", "cosmology_equal"]
|
9ad1ee982158f361341ed000f0566aa1f6012ad734d093960ce96b41680b923e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["Parameter"]
from ._core import Parameter
|
d28f308b468b49bf7ddfa100074ef3d839f60f61a52a3ddb3e1b4b540178ff8f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from typing import Any, Callable
import astropy.units as u
__all__ = []
FValidateCallable = Callable[[object, object, Any], Any]
_REGISTRY_FVALIDATORS: dict[str, FValidateCallable] = {}
def _register_validator(key, fvalidate=None):
"""Decorator to register a new kind of validator function.
Parameters
----------
key : str
fvalidate : callable[[object, object, Any], Any] or None, optional
Value validation function.
Returns
-------
``validator`` or callable[``validator``]
if validator is None returns a function that takes and registers a
validator. This allows ``register_validator`` to be used as a
decorator.
"""
if key in _REGISTRY_FVALIDATORS:
raise KeyError(f"validator {key!r} already registered with Parameter.")
# fvalidate directly passed
if fvalidate is not None:
_REGISTRY_FVALIDATORS[key] = fvalidate
return fvalidate
# for use as a decorator
def register(fvalidate):
"""Register validator function.
Parameters
----------
fvalidate : callable[[object, object, Any], Any]
Validation function.
Returns
-------
``validator``
"""
_REGISTRY_FVALIDATORS[key] = fvalidate
return fvalidate
return register
# ======================================================================
@_register_validator("default")
def _validate_with_unit(cosmology, param, value):
"""
Default Parameter value validator.
Adds/converts units if Parameter has a unit.
"""
if param.unit is not None:
with u.add_enabled_equivalencies(param.equivalencies):
value = u.Quantity(value, param.unit)
return value
@_register_validator("float")
def _validate_to_float(cosmology, param, value):
"""Parameter value validator with units, and converted to float."""
value = _validate_with_unit(cosmology, param, value)
return float(value)
@_register_validator("scalar")
def _validate_to_scalar(cosmology, param, value):
""""""
value = _validate_with_unit(cosmology, param, value)
if not value.isscalar:
raise ValueError(f"{param.name} is a non-scalar quantity")
return value
@_register_validator("non-negative")
def _validate_non_negative(cosmology, param, value):
"""Parameter value validator where value is a positive float."""
value = _validate_to_float(cosmology, param, value)
if value < 0.0:
raise ValueError(f"{param.name} cannot be negative.")
return value
|
9bd6ebc468fc7fdda026616db2e3f31dfd399c21d5a76241ee61799f66d2d118 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import astropy.units as u
from ._converter import _REGISTRY_FVALIDATORS, _register_validator
__all__ = []
class Parameter:
r"""Cosmological parameter (descriptor).
Should only be used with a :class:`~astropy.cosmology.Cosmology` subclass.
Parameters
----------
derived : bool (optional, keyword-only)
Whether the Parameter is 'derived', default `False`.
Derived parameters behave similarly to normal parameters, but are not
sorted by the |Cosmology| signature (probably not there) and are not
included in all methods. For reference, see ``Ode0`` in
``FlatFLRWMixin``, which removes :math:`\Omega_{de,0}`` as an
independent parameter (:math:`\Omega_{de,0} \equiv 1 - \Omega_{tot}`).
unit : unit-like or None (optional, keyword-only)
The `~astropy.units.Unit` for the Parameter. If None (default) no
unit as assumed.
equivalencies : `~astropy.units.Equivalency` or sequence thereof
Unit equivalencies for this Parameter.
fvalidate : callable[[object, object, Any], Any] or str (optional, keyword-only)
Function to validate the Parameter value from instances of the
cosmology class. If "default", uses default validator to assign units
(with equivalencies), if Parameter has units.
For other valid string options, see ``Parameter._registry_validators``.
'fvalidate' can also be set through a decorator with
:meth:`~astropy.cosmology.Parameter.validator`.
doc : str or None (optional, keyword-only)
Parameter description.
Examples
--------
For worked examples see :class:`~astropy.cosmology.FLRW`.
"""
def __init__(
self,
*,
derived=False,
unit=None,
equivalencies=[],
fvalidate="default",
doc=None,
):
# attribute name on container cosmology class.
# really set in __set_name__, but if Parameter is not init'ed as a
# descriptor this ensures that the attributes exist.
self._attr_name = self._attr_name_private = None
self._derived = derived
self.__doc__ = doc
# units stuff
self._unit = u.Unit(unit) if unit is not None else None
self._equivalencies = equivalencies
# Parse registered `fvalidate`
self._fvalidate_in = fvalidate # Always store input fvalidate.
if callable(fvalidate):
pass
elif fvalidate in _REGISTRY_FVALIDATORS:
fvalidate = _REGISTRY_FVALIDATORS[fvalidate]
elif isinstance(fvalidate, str):
raise ValueError(
f"`fvalidate`, if str, must be in {_REGISTRY_FVALIDATORS.keys()}"
)
else:
raise TypeError(
f"`fvalidate` must be a function or {_REGISTRY_FVALIDATORS.keys()}"
)
self._fvalidate = fvalidate
def __set_name__(self, cosmo_cls, name):
# attribute name on container cosmology class
self._attr_name = name
self._attr_name_private = "_" + name
@property
def name(self):
"""Parameter name."""
return self._attr_name
@property
def unit(self):
"""Parameter unit."""
return self._unit
@property
def equivalencies(self):
"""Equivalencies used when initializing Parameter."""
return self._equivalencies
@property
def derived(self):
"""Whether the Parameter is derived; true parameters are not."""
return self._derived
# -------------------------------------------
# descriptor and property-like methods
def __get__(self, cosmology, cosmo_cls=None):
# Get from class
if cosmology is None:
return self
# Get from instance
return getattr(cosmology, self._attr_name_private)
def __set__(self, cosmology, value):
"""Allows attribute setting once. Raises AttributeError subsequently."""
# Raise error if setting 2nd time.
if hasattr(cosmology, self._attr_name_private):
raise AttributeError(f"can't set attribute {self._attr_name} again")
# Validate value, generally setting units if present
value = self.validate(cosmology, copy.deepcopy(value))
# Make the value read-only, if ndarray-like
if hasattr(value, "setflags"):
value.setflags(write=False)
# Set the value on the cosmology
setattr(cosmology, self._attr_name_private, value)
# -------------------------------------------
# validate value
@property
def fvalidate(self):
"""Function to validate a potential value of this Parameter."""
return self._fvalidate
def validator(self, fvalidate):
"""Make new Parameter with custom ``fvalidate``.
Note: ``Parameter.fvalidator`` must be the top-most descriptor decorator.
Parameters
----------
fvalidate : callable[[type, type, Any], Any]
Returns
-------
`~astropy.cosmology.Parameter`
Copy of this Parameter but with custom ``fvalidate``.
"""
return self.clone(fvalidate=fvalidate)
def validate(self, cosmology, value):
"""Run the validator on this Parameter.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` instance
value : Any
The object to validate.
Returns
-------
Any
The output of calling ``fvalidate(cosmology, self, value)``
(yes, that parameter order).
"""
return self.fvalidate(cosmology, self, value)
@staticmethod
def register_validator(key, fvalidate=None):
"""Decorator to register a new kind of validator function.
Parameters
----------
key : str
fvalidate : callable[[object, object, Any], Any] or None, optional
Value validation function.
Returns
-------
``validator`` or callable[``validator``]
if validator is None returns a function that takes and registers a
validator. This allows ``register_validator`` to be used as a
decorator.
"""
return _register_validator(key, fvalidate=fvalidate)
# -------------------------------------------
def _get_init_arguments(self, processed=False):
"""Initialization arguments.
Parameters
----------
processed : bool
Whether to more closely reproduce the input arguments (`False`,
default) or the processed arguments (`True`). The former is better
for string representations and round-tripping with ``eval(repr())``.
Returns
-------
dict[str, Any]
"""
# The keys are added in this order because `repr` prints them in order.
kw = {
"derived": self.derived,
"unit": self.unit,
"equivalencies": self.equivalencies,
# Validator is always turned into a function, but for ``repr`` it's nice
# to know if it was originally a string.
"fvalidate": self.fvalidate if processed else self._fvalidate_in,
"doc": self.__doc__,
}
return kw
def clone(self, **kw):
"""Clone this `Parameter`, changing any constructor argument.
Parameters
----------
**kw
Passed to constructor. The current values, eg. ``fvalidate`` are
used as the default values, so an empty ``**kw`` is an exact copy.
Examples
--------
>>> p = Parameter()
>>> p
Parameter(derived=False, unit=None, equivalencies=[],
fvalidate='default', doc=None)
>>> p.clone(unit="km")
Parameter(derived=False, unit=Unit("km"), equivalencies=[],
fvalidate='default', doc=None)
"""
# Start with defaults, update from kw.
kwargs = {**self._get_init_arguments(), **kw}
# All initialization failures, like incorrect input are handled by init
cloned = type(self)(**kwargs)
# Transfer over the __set_name__ stuff. If `clone` is used to make a
# new descriptor, __set_name__ will be called again, overwriting this.
cloned._attr_name = self._attr_name
cloned._attr_name_private = self._attr_name_private
return cloned
def __eq__(self, other):
"""Check Parameter equality. Only equal to other Parameter objects.
Returns
-------
NotImplemented or True
`True` if equal, `NotImplemented` otherwise. This allows `other` to
be check for equality with ``other.__eq__``.
Examples
--------
>>> p1, p2 = Parameter(unit="km"), Parameter(unit="km")
>>> p1 == p2
True
>>> p3 = Parameter(unit="km / s")
>>> p3 == p1
False
>>> p1 != 2
True
"""
if not isinstance(other, Parameter):
return NotImplemented
# Check equality on all `_init_arguments` & `name`.
# Need to compare the processed arguments because the inputs are many-
# to-one, e.g. `fvalidate` can be a string or the equivalent function.
return (self._get_init_arguments(True) == other._get_init_arguments(True)) and (
self.name == other.name
)
def __repr__(self):
"""String representation.
``eval(repr())`` should work, depending if contents like ``fvalidate``
can be similarly round-tripped.
"""
return "Parameter({})".format(
", ".join(f"{k}={v!r}" for k, v in self._get_init_arguments().items())
)
|
d960db124989d14f657a0024e345c312e54296d30a5d91f7f08238832aac3070 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.cosmology.utils import aszarr, vectorize_redshift_method
from .test_core import _zarr, invalid_zs, valid_zs
def test_vectorize_redshift_method():
"""Test :func:`astropy.cosmology.utils.vectorize_redshift_method`."""
class Class:
@vectorize_redshift_method
def method(self, z):
return z
c = Class()
assert hasattr(c.method, "__vectorized__")
assert isinstance(c.method.__vectorized__, np.vectorize)
# calling with Number
assert c.method(1) == 1
assert isinstance(c.method(1), int)
# calling with a numpy scalar
assert c.method(np.float64(1)) == np.float64(1)
assert isinstance(c.method(np.float64(1)), np.float64)
# numpy array
assert all(c.method(np.array([1, 2])) == np.array([1, 2]))
assert isinstance(c.method(np.array([1, 2])), np.ndarray)
# non-scalar
assert all(c.method([1, 2]) == np.array([1, 2]))
assert isinstance(c.method([1, 2]), np.ndarray)
# -------------------------------------------------------------------
class Test_aszarr:
@pytest.mark.parametrize(
"z, expect",
list(
zip(
valid_zs,
[0, 1, 1100, np.float64(3300), 2.0, 3.0, _zarr, _zarr, _zarr, _zarr],
)
),
)
def test_valid(self, z, expect):
"""Test :func:`astropy.cosmology.utils.aszarr`."""
got = aszarr(z)
assert np.array_equal(got, expect)
@pytest.mark.parametrize("z, exc", invalid_zs)
def test_invalid(self, z, exc):
"""Test :func:`astropy.cosmology.utils.aszarr`."""
with pytest.raises(exc):
aszarr(z)
|
5dbd19d6892b494618e47c5d2638c877008442d5191b2a9c3160daa67fc4a1bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.parameter`."""
##############################################################################
# IMPORTS
# STDLIB
import inspect
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import Cosmology
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.parameter._converter import (
_REGISTRY_FVALIDATORS,
_validate_to_float,
_validate_with_unit,
)
##############################################################################
# TESTS
##############################################################################
def test_registry_validators():
"""Test :class:`astropy.cosmology.Parameter` attributes on class."""
# _registry_validators
assert isinstance(_REGISTRY_FVALIDATORS, dict)
assert all(isinstance(k, str) for k in _REGISTRY_FVALIDATORS.keys())
assert all(callable(v) for v in _REGISTRY_FVALIDATORS.values())
class ParameterTestMixin:
"""Tests for a :class:`astropy.cosmology.Parameter` on a Cosmology.
:class:`astropy.cosmology.Parameter` is a descriptor and this test suite
tests descriptors by class inheritance, so ``ParameterTestMixin`` is mixed
into ``TestCosmology`` (tests :class:`astropy.cosmology.Cosmology`).
"""
@pytest.fixture
def parameter(self, cosmo_cls):
"""Cosmological Parameters"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__parameters__).pop())
@pytest.fixture
def all_parameter(self, cosmo_cls):
"""Cosmological All Parameter instances"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__all_parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__all_parameters__).pop())
# ===============================================================
# Method Tests
def test_Parameter_init(self):
"""Test :class:`astropy.cosmology.Parameter` instantiation."""
# defaults
parameter = Parameter()
assert parameter.fvalidate is _validate_with_unit
assert parameter.unit is None
assert parameter.equivalencies == []
assert parameter.derived is False
assert parameter.name is None
# setting all kwargs
parameter = Parameter(
fvalidate="float",
doc="DOCSTRING",
unit="km",
equivalencies=[u.mass_energy()],
derived=True,
)
assert parameter.fvalidate is _validate_to_float
assert parameter.unit is u.km
assert parameter.equivalencies == [u.mass_energy()]
assert parameter.derived is True
def test_Parameter_instance_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
assert hasattr(all_parameter, "__doc__")
# Parameter
assert hasattr(all_parameter, "_unit")
assert hasattr(all_parameter, "_equivalencies")
assert hasattr(all_parameter, "_derived")
# __set_name__
assert hasattr(all_parameter, "_attr_name")
assert hasattr(all_parameter, "_attr_name_private")
def test_Parameter_fvalidate(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
def test_Parameter_name(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
assert hasattr(all_parameter, "name")
assert isinstance(all_parameter.name, str)
assert all_parameter.name is all_parameter._attr_name
def test_Parameter_unit(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
assert hasattr(all_parameter, "unit")
assert isinstance(all_parameter.unit, (u.UnitBase, type(None)))
assert all_parameter.unit is all_parameter._unit
def test_Parameter_equivalencies(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
assert hasattr(all_parameter, "equivalencies")
assert isinstance(all_parameter.equivalencies, (list, u.Equivalency))
assert all_parameter.equivalencies is all_parameter._equivalencies
def test_Parameter_derived(self, cosmo_cls, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
assert hasattr(all_parameter, "derived")
assert isinstance(all_parameter.derived, bool)
assert all_parameter.derived is all_parameter._derived
# test value
if all_parameter.name in cosmo_cls.__parameters__:
assert all_parameter.derived is False
else:
assert all_parameter.derived is True
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__get__`."""
# from class
parameter = getattr(cosmo_cls, all_parameter.name)
assert isinstance(parameter, Parameter)
assert parameter is all_parameter
# from instance
parameter = getattr(cosmo, all_parameter.name)
assert np.all(parameter == getattr(cosmo, all_parameter._attr_name_private))
def test_Parameter_descriptor_set(self, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__set__`."""
# test it's already set
assert hasattr(cosmo, all_parameter._attr_name_private)
# and raises an error if set again
with pytest.raises(AttributeError, match="can't set attribute"):
setattr(cosmo, all_parameter._attr_name, None)
# -------------------------------------------
# validate value
# tested later.
# ===============================================================
# Usage Tests
def test_Parameter_listed(self, cosmo_cls, all_parameter):
"""Test each `astropy.cosmology.Parameter` attached to Cosmology."""
# just double check that each entry is a Parameter
assert isinstance(all_parameter, Parameter)
# the reverse: check that if it is a Parameter, it's listed.
# note have to check the more inclusive ``__all_parameters__``
assert all_parameter.name in cosmo_cls.__all_parameters__
if not all_parameter.derived:
assert all_parameter.name in cosmo_cls.__parameters__
def test_parameter_related_attributes_on_Cosmology(self, cosmo_cls):
"""Test `astropy.cosmology.Parameter`-related on Cosmology."""
# establish has expected attribute
assert hasattr(cosmo_cls, "__parameters__")
assert hasattr(cosmo_cls, "__all_parameters__")
def test_Parameter_not_unique(self, cosmo_cls, clean_registry):
"""Cosmology Parameter not unique to class when subclass defined."""
# define subclass to show param is same
class ExampleBase(cosmo_cls):
param = Parameter()
class Example(ExampleBase):
pass
assert Example.param is ExampleBase.param
assert Example.__parameters__ == ExampleBase.__parameters__
def test_Parameters_reorder_by_signature(self, cosmo_cls, clean_registry):
"""Test parameters are reordered."""
class Example(cosmo_cls):
param = Parameter()
def __init__(self, param, *, name=None, meta=None):
pass # never actually initialized
# param should be 1st, all other parameters next
assert Example.__parameters__[0] == "param"
# Check the other parameters are as expected.
# only run this test if "param" is not already on the cosmology
if cosmo_cls.__parameters__[0] != "param":
assert set(Example.__parameters__[1:]) == set(cosmo_cls.__parameters__)
def test_make_from_Parameter(self, cosmo_cls, clean_registry):
"""Test the parameter creation process. Uses ``__set__``."""
class Example(cosmo_cls):
param = Parameter(unit=u.eV, equivalencies=u.mass_energy())
def __init__(self, param, *, name=None, meta=None):
self.param = param
@property
def is_flat(self):
return super().is_flat()
assert Example(1).param == 1 * u.eV
assert Example(1 * u.eV).param == 1 * u.eV
assert Example(1 * u.J).param == (1 * u.J).to(u.eV)
assert Example(1 * u.kg).param == (1 * u.kg).to(u.eV, u.mass_energy())
# ========================================================================
class TestParameter(ParameterTestMixin):
"""
Test `astropy.cosmology.Parameter` directly. Adds a lot of specific tests
that wouldn't be covered by the per-cosmology tests.
"""
def setup_class(self):
class Example1(Cosmology):
param = Parameter(
doc="Description of example parameter.",
unit=u.m,
equivalencies=u.mass_energy(),
)
def __init__(self, param=15):
self.param = param
@property
def is_flat(self):
return super().is_flat()
# with validator
class Example2(Example1):
def __init__(self, param=15 * u.m):
self.param = param
@Example1.param.validator
def param(self, param, value):
return value.to(u.km)
# attributes
self.classes = {"Example1": Example1, "Example2": Example2}
def teardown_class(self):
for cls in self.classes.values():
_COSMOLOGY_CLASSES.pop(cls.__qualname__)
@pytest.fixture(scope="class", params=["Example1", "Example2"])
def cosmo_cls(self, request):
"""Cosmology class."""
return self.classes[request.param]
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""Cosmology instance"""
return cosmo_cls()
@pytest.fixture(scope="class")
def param(self, cosmo_cls):
"""Get Parameter 'param' from cosmology class."""
return cosmo_cls.param
@pytest.fixture(scope="class")
def param_cls(self, cosmo_cls):
"""Get Parameter class from cosmology class."""
return cosmo_cls.param.__class__
# ==============================================================
def test_Parameter_instance_attributes(self, param):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
super().test_Parameter_instance_attributes(param)
# property
assert param.__doc__ == "Description of example parameter."
# custom from init
assert param._unit == u.m
assert param._equivalencies == u.mass_energy()
assert param._derived == np.False_
# custom from set_name
assert param._attr_name == "param"
assert param._attr_name_private == "_param"
def test_Parameter_fvalidate(self, cosmo, param):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
super().test_Parameter_fvalidate(param)
value = param.fvalidate(cosmo, param, 1000 * u.m)
assert value == 1 * u.km
def test_Parameter_name(self, param):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
super().test_Parameter_name(param)
assert param.name == "param"
def test_Parameter_unit(self, param):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
super().test_Parameter_unit(param)
assert param.unit == u.m
def test_Parameter_equivalencies(self, param):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
super().test_Parameter_equivalencies(param)
assert param.equivalencies == u.mass_energy()
def test_Parameter_derived(self, cosmo_cls, param):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
super().test_Parameter_derived(cosmo_cls, param)
assert param.derived is False
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.__get__`."""
super().test_Parameter_descriptor_get(cosmo_cls, cosmo, param)
# from instance
value = getattr(cosmo, param.name)
assert value == 15 * u.m
# -------------------------------------------
# validation
def test_Parameter_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.validator`."""
for k in _REGISTRY_FVALIDATORS:
newparam = param.validator(k)
assert newparam.fvalidate == _REGISTRY_FVALIDATORS[k]
# error for non-registered str
with pytest.raises(ValueError, match="`fvalidate`, if str"):
Parameter(fvalidate="NOT REGISTERED")
# error if wrong type
with pytest.raises(TypeError, match="`fvalidate` must be a function or"):
Parameter(fvalidate=object())
def test_Parameter_validate(self, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.validate`."""
value = param.validate(cosmo, 1000 * u.m)
# whether has custom validator
if param.fvalidate is _REGISTRY_FVALIDATORS["default"]:
assert value.unit == u.m
assert value.value == 1000
else:
assert value.unit == u.km
assert value.value == 1
def test_Parameter_register_validator(self, param_cls):
"""Test :meth:`astropy.cosmology.Parameter.register_validator`."""
# already registered
with pytest.raises(KeyError, match="validator 'default' already"):
param_cls.register_validator("default", None)
# validator not None
def notnonefunc(x):
return x
try:
validator = param_cls.register_validator("newvalidator", notnonefunc)
assert validator is notnonefunc
finally:
_REGISTRY_FVALIDATORS.pop("newvalidator", None)
# used as decorator
try:
@param_cls.register_validator("newvalidator")
def func(cosmology, param, value):
return value
assert _REGISTRY_FVALIDATORS["newvalidator"] is func
finally:
_REGISTRY_FVALIDATORS.pop("newvalidator", None)
# -------------------------------------------
def test_Parameter_clone(self, param):
"""Test :meth:`astropy.cosmology.Parameter.clone`."""
# this implicitly relies on `__eq__` testing properly. Which is tested.
# basic test that nothing changes
assert param.clone() == param
assert param.clone() is not param # but it's not a 'singleton'
# passing kwargs will change stuff
newparam = param.clone(unit="km/(yr sr)")
assert newparam.unit == u.km / u.yr / u.sr
assert param.unit != u.km / u.yr / u.sr # original is unchanged
# expected failure for not-an-argument
with pytest.raises(TypeError):
param.clone(not_a_valid_parameter=True)
# -------------------------------------------
def test_Parameter_equality(self):
"""
Test Parameter equality.
Determined from the processed initialization args (including defaults).
"""
p1 = Parameter(unit="km / (s Mpc)")
p2 = Parameter(unit="km / (s Mpc)")
assert p1 == p2
# not equal parameters
p3 = Parameter(unit="km / s")
assert p3 != p1
# misc
assert p1 != 2 # show doesn't error
# -------------------------------------------
def test_Parameter_repr(self, cosmo_cls, param):
"""Test Parameter repr."""
r = repr(param)
assert "Parameter(" in r
for subs in (
"derived=False",
'unit=Unit("m")',
'equivalencies=[(Unit("kg"), Unit("J")',
"doc='Description of example parameter.'",
):
assert subs in r, subs
# `fvalidate` is a little tricker b/c one of them is custom!
if param.fvalidate in _REGISTRY_FVALIDATORS.values(): # not custom
assert "fvalidate='default'" in r
else:
assert "fvalidate=<" in r # Some function, don't care about details.
def test_Parameter_repr_roundtrip(self, param):
"""Test ``eval(repr(Parameter))`` can round trip to ``Parameter``."""
P = Parameter(doc="A description of this parameter.", derived=True)
NP = eval(repr(P)) # Evaluate string representation back into a param.
assert P == NP
# ==============================================================
def test_Parameter_doesnt_change_with_generic_class(self):
"""Descriptors are initialized once and not updated on subclasses."""
class ExampleBase:
def __init__(self, param=15):
self._param = param
sig = inspect.signature(__init__)
_init_signature = sig.replace(parameters=list(sig.parameters.values())[1:])
param = Parameter(doc="example parameter")
class Example(ExampleBase):
pass
assert Example.param is ExampleBase.param
def test_Parameter_doesnt_change_with_cosmology(self, cosmo_cls):
"""Cosmology reinitializes all descriptors when a subclass is defined."""
# define subclass to show param is same
class Example(cosmo_cls):
pass
assert Example.param is cosmo_cls.param
# unregister
_COSMOLOGY_CLASSES.pop(Example.__qualname__)
assert Example.__qualname__ not in _COSMOLOGY_CLASSES
|
e26fb18e6f7a39fa162864f54959de6c8194e16e9d527e5386c81071f48dfc92 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import os
import pytest
from astropy import cosmology
from astropy.cosmology import Cosmology, w0wzCDM
from astropy.cosmology._io.tests import (
test_cosmology,
test_ecsv,
test_html,
test_json,
test_latex,
test_mapping,
test_model,
test_row,
test_table,
test_yaml,
)
from astropy.cosmology.connect import readwrite_registry
from astropy.table import QTable, Row
from astropy.utils.compat.optional_deps import HAS_BS4
###############################################################################
# SETUP
cosmo_instances = cosmology.realizations.available
# Collect the registered read/write formats.
# (format, supports_metadata, has_all_required_dependencies)
readwrite_formats = {
("ascii.ecsv", True, True),
("ascii.html", False, HAS_BS4),
("ascii.latex", False, True),
("json", True, True),
("latex", False, True),
}
# Collect all the registered to/from formats. Unfortunately this is NOT
# automatic since the output format class is not stored on the registry.
# (format, data type)
tofrom_formats = [
("mapping", dict),
("yaml", str),
("astropy.cosmology", Cosmology),
("astropy.row", Row),
("astropy.table", QTable),
]
###############################################################################
class ReadWriteTestMixin(
test_ecsv.ReadWriteECSVTestMixin,
test_html.ReadWriteHTMLTestMixin,
test_json.ReadWriteJSONTestMixin,
test_latex.WriteLATEXTestMixin,
):
"""
Tests for a CosmologyRead/Write on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestReadWriteCosmology`` or ``TestCosmology`` for examples.
"""
@pytest.mark.parametrize("format, metaio, has_deps", readwrite_formats)
def test_readwrite_complete_info(self, cosmo, tmp_path, format, metaio, has_deps):
"""
Test writing from an instance and reading from the base class.
This requires full information.
The round-tripped metadata can be in a different order, so the
OrderedDict must be converted to a dict before testing equality.
"""
if not has_deps:
pytest.skip("missing a dependency")
if (format, Cosmology) not in readwrite_registry._readers:
pytest.xfail(f"no read method is registered for format {format!r}")
fname = str(tmp_path / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
# Also test kwarg "overwrite"
assert os.path.exists(fname) # file exists
with pytest.raises(IOError):
cosmo.write(fname, format=format, overwrite=False)
assert os.path.exists(fname) # overwrite file existing file
cosmo.write(fname, format=format, overwrite=True)
# Read back
got = Cosmology.read(fname, format=format)
assert got == cosmo
assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
@pytest.mark.parametrize("format, metaio, has_deps", readwrite_formats)
def test_readwrite_from_subclass_complete_info(
self, cosmo_cls, cosmo, tmp_path, format, metaio, has_deps
):
"""
Test writing from an instance and reading from that class, when there's
full information saved.
"""
if not has_deps:
pytest.skip("missing a dependency")
if (format, Cosmology) not in readwrite_registry._readers:
pytest.xfail(f"no read method is registered for format {format!r}")
fname = str(tmp_path / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
# read with the same class that wrote.
got = cosmo_cls.read(fname, format=format)
assert got == cosmo
assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
# this should be equivalent to
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls)
assert got == cosmo
assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
# and also
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls.__qualname__)
assert got == cosmo
assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
class TestCosmologyReadWrite(ReadWriteTestMixin):
"""Test the classes CosmologyRead/Write."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format, _, has_deps", readwrite_formats)
def test_write_methods_have_explicit_kwarg_overwrite(self, format, _, has_deps):
if not has_deps:
pytest.skip("missing a dependency")
if (format, Cosmology) not in readwrite_registry._readers:
pytest.xfail(f"no read method is registered for format {format!r}")
writer = readwrite_registry.get_writer(format, Cosmology)
# test in signature
sig = inspect.signature(writer)
assert "overwrite" in sig.parameters
# also in docstring
assert "overwrite : bool" in writer.__doc__
@pytest.mark.parametrize("format, _, has_deps", readwrite_formats)
def test_readwrite_reader_class_mismatch(
self, cosmo, tmp_path, format, _, has_deps
):
"""Test when the reader class doesn't match the file."""
if not has_deps:
pytest.skip("missing a dependency")
if (format, Cosmology) not in readwrite_registry._readers:
pytest.xfail(f"no read method is registered for format {format!r}")
fname = tmp_path / f"{cosmo.name}.{format}"
cosmo.write(fname, format=format)
# class mismatch
# when reading directly
with pytest.raises(TypeError, match="missing 1 required"):
w0wzCDM.read(fname, format=format)
with pytest.raises(TypeError, match="missing 1 required"):
Cosmology.read(fname, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.read(fname, format=format, cosmology="FlatLambdaCDM")
###############################################################################
# To/From_Format Tests
class ToFromFormatTestMixin(
test_cosmology.ToFromCosmologyTestMixin,
test_mapping.ToFromMappingTestMixin,
test_model.ToFromModelTestMixin,
test_row.ToFromRowTestMixin,
test_table.ToFromTableTestMixin,
test_yaml.ToFromYAMLTestMixin,
):
"""
Tests for a Cosmology[To/From]Format on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_tofromformat_complete_info(
self, cosmo, format, totype, xfail_if_not_registered_with_yaml
):
"""Read tests happen later."""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# test from_format
got = Cosmology.from_format(obj, format=format)
# Test autodetect, if enabled
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj)
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_fromformat_subclass_complete_info(
self, cosmo_cls, cosmo, format, totype, xfail_if_not_registered_with_yaml
):
"""
Test transforming an instance and parsing from that class, when there's
full information available.
Partial information tests are handled in the Mixin super classes.
"""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# read with the same class that wrote.
got = cosmo_cls.from_format(obj, format=format)
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj) # and autodetect
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
# this should be equivalent to
got = Cosmology.from_format(obj, format=format, cosmology=cosmo_cls)
assert got == cosmo
assert got.meta == cosmo.meta
# and also
got = Cosmology.from_format(
obj, format=format, cosmology=cosmo_cls.__qualname__
)
assert got == cosmo
assert got.meta == cosmo.meta
class TestCosmologyToFromFormat(ToFromFormatTestMixin):
"""Test Cosmology[To/From]Format classes."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format_type", tofrom_formats)
def test_fromformat_class_mismatch(self, cosmo, format_type):
format, totype = format_type
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# class mismatch
with pytest.raises(TypeError):
w0wzCDM.from_format(obj, format=format)
with pytest.raises(TypeError):
Cosmology.from_format(obj, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.from_format(obj, format=format, cosmology="FlatLambdaCDM")
|
e9b642f5f7d847545c95fc4382f13d9b6a2f51f3b68635bb7f7e47593f86dad4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.core`."""
import abc
import inspect
import pickle
import numpy as np
import pytest
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Cosmology, FlatCosmologyMixin
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter
from astropy.table import Column, QTable, Table
from astropy.utils.compat import PYTHON_LT_3_11
from astropy.utils.metadata import MetaData
from .test_connect import ReadWriteTestMixin, ToFromFormatTestMixin
from .test_parameter import ParameterTestMixin
##############################################################################
# SETUP / TEARDOWN
scalar_zs = [
0,
1,
1100, # interesting times
# FIXME! np.inf breaks some funcs. 0 * inf is an error
np.float64(3300), # different type
2 * cu.redshift,
3 * u.one, # compatible units
]
_zarr = np.linspace(0, 1e5, num=20)
array_zs = [
_zarr, # numpy
_zarr.tolist(), # pure python
Column(_zarr), # table-like
_zarr * cu.redshift, # Quantity
]
valid_zs = scalar_zs + array_zs
invalid_zs = [
(None, TypeError), # wrong type
# Wrong units (the TypeError is for the cython, which can differ)
(4 * u.MeV, (u.UnitConversionError, TypeError)), # scalar
([0, 1] * u.m, (u.UnitConversionError, TypeError)), # array
]
class SubCosmology(Cosmology):
"""Defined here to be serializable."""
H0 = Parameter(unit="km/(s Mpc)")
Tcmb0 = Parameter(unit=u.K)
m_nu = Parameter(unit=u.eV)
def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
self.H0 = H0
self.Tcmb0 = Tcmb0
self.m_nu = m_nu
@property
def is_flat(self):
return super().is_flat()
##############################################################################
# TESTS
##############################################################################
class MetaTestMixin:
"""Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology."""
def test_meta_on_class(self, cosmo_cls):
assert isinstance(cosmo_cls.meta, MetaData)
def test_meta_on_instance(self, cosmo):
assert isinstance(cosmo.meta, dict) # test type
# value set at initialization
assert cosmo.meta == self.cls_kwargs.get("meta", {})
def test_meta_mutable(self, cosmo):
"""The metadata is NOT immutable on a cosmology"""
key = tuple(cosmo.meta.keys())[0] # select some key
cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable
class CosmologyTest(
ParameterTestMixin,
MetaTestMixin,
ReadWriteTestMixin,
ToFromFormatTestMixin,
metaclass=abc.ABCMeta,
):
"""
Test subclasses of :class:`astropy.cosmology.Cosmology`.
"""
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
def teardown_class(self):
pass
@property
def cls_args(self):
return tuple(self._cls_args.values())
@pytest.fixture(scope="class")
def cosmo_cls(self):
"""The Cosmology class as a :func:`pytest.fixture`."""
return self.cls
@pytest.fixture(scope="function") # ensure not cached.
def ba(self):
"""Return filled `inspect.BoundArguments` for cosmology."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return ba
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""The cosmology instance with which to test."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return cosmo_cls(*ba.args, **ba.kwargs)
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test creating subclasses registers classes and manages Parameters."""
# -----------------------------------------------------------
# Normal subclass creation
class InitSubclassTest(cosmo_cls):
pass
# test parameters
assert InitSubclassTest.__parameters__ == cosmo_cls.__parameters__
# test and cleanup registry
registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__)
assert registrant is InitSubclassTest
# -----------------------------------------------------------
# Skip
class UnRegisteredSubclassTest(cosmo_cls):
@classmethod
def _register_cls(cls):
"""Override to not register."""
pass
assert UnRegisteredSubclassTest.__parameters__ == cosmo_cls.__parameters__
assert UnRegisteredSubclassTest.__qualname__ not in _COSMOLOGY_CLASSES
def test_init_signature(self, cosmo_cls, cosmo):
"""Test class-property ``_init_signature``."""
# test presence
assert hasattr(cosmo_cls, "_init_signature")
assert hasattr(cosmo, "_init_signature")
# test internal consistency, so following tests can use either cls or instance.
assert cosmo_cls._init_signature == cosmo._init_signature
# test matches __init__, but without 'self'
sig = inspect.signature(cosmo.__init__) # (instances don't have self)
assert set(sig.parameters.keys()) == set(
cosmo._init_signature.parameters.keys()
)
assert all(
np.all(sig.parameters[k].default == p.default)
for k, p in cosmo._init_signature.parameters.items()
)
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
"""Test initialization."""
# Cosmology only does name and meta, but this subclass adds H0 & Tcmb0.
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta={"m": 1})
assert cosmo.name == "test_init"
assert cosmo.meta["m"] == 1
# if meta is None, it is changed to a dict
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta=None)
assert cosmo.meta == {}
def test_name(self, cosmo):
"""Test property ``name``."""
assert cosmo.name is cosmo._name # accesses private attribute
assert cosmo.name is None or isinstance(cosmo.name, str) # type
assert cosmo.name == self.cls_kwargs["name"] # test has expected value
# immutable
match = (
"can't set"
if PYTHON_LT_3_11
else f"property 'name' of {cosmo.__class__.__name__!r} object has no setter"
)
with pytest.raises(AttributeError, match=match):
cosmo.name = None
@abc.abstractmethod
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# ------------------------------------------------
# clone
def test_clone_identical(self, cosmo):
"""Test method ``.clone()`` if no (kw)args."""
assert cosmo.clone() is cosmo
def test_clone_name(self, cosmo):
"""Test method ``.clone()`` name argument."""
# test changing name. clone treats 'name' differently (see next test)
c = cosmo.clone(name="cloned cosmo")
assert c.name == "cloned cosmo" # changed
# show name is the only thing changed
c._name = cosmo.name # first change name back
assert c == cosmo
assert c.meta == cosmo.meta
# now change a different parameter and see how 'name' changes
c = cosmo.clone(meta={"test_clone_name": True})
assert c.name == cosmo.name + " (modified)"
def test_clone_meta(self, cosmo):
"""Test method ``.clone()`` meta argument: updates meta, doesn't clear."""
# start with no change
c = cosmo.clone(meta=None)
assert c.meta == cosmo.meta
# add something
c = cosmo.clone(meta=dict(test_clone_meta=True))
assert c.meta["test_clone_meta"] is True
c.meta.pop("test_clone_meta") # remove from meta
assert c.meta == cosmo.meta # now they match
def test_clone_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s).
Nothing here b/c no Parameters.
"""
def test_clone_fail_unexpected_arg(self, cosmo):
"""Test when ``.clone()`` gets an unexpected argument."""
with pytest.raises(TypeError, match="unexpected keyword argument"):
cosmo.clone(not_an_arg=4)
def test_clone_fail_positional_arg(self, cosmo):
with pytest.raises(TypeError, match="1 positional argument"):
cosmo.clone(None)
# ---------------------------------------------------------------
# comparison methods
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`."""
# to self
assert cosmo.is_equivalent(cosmo)
# same class, different instance
newclone = cosmo.clone(name="test_is_equivalent")
assert cosmo.is_equivalent(newclone)
assert newclone.is_equivalent(cosmo)
# different class and not convertible to Cosmology.
assert not cosmo.is_equivalent(2)
def test_equality(self, cosmo):
"""Test method ``.__eq__()."""
# wrong class
assert (cosmo != 2) and (2 != cosmo)
# correct
assert cosmo == cosmo
# different name <= not equal, but equivalent
newcosmo = cosmo.clone(name="test_equality")
assert (cosmo != newcosmo) and (newcosmo != cosmo)
assert cosmo.__equiv__(newcosmo) and newcosmo.__equiv__(cosmo)
# ---------------------------------------------------------------
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``.
This is a very general test and it is probably good to have a
hard-coded comparison.
"""
r = repr(cosmo)
# class in string rep
assert cosmo_cls.__qualname__ in r
assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing
r = r[len(cosmo_cls.__qualname__) + 1 :] # remove
# name in string rep
if cosmo.name is not None:
assert f'name="{cosmo.name}"' in r
assert r.index("name=") == 0
r = r[6 + len(cosmo.name) + 3 :] # remove
# parameters in string rep
ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__}
for k, v in ps.items():
sv = f"{k}={v}"
assert sv in r
assert r.index(k) == 0
r = r[len(sv) + 2 :] # remove
# ------------------------------------------------
@pytest.mark.parametrize("in_meta", [True, False])
@pytest.mark.parametrize("table_cls", [Table, QTable])
def test_astropy_table(self, cosmo, table_cls, in_meta):
"""Test ``astropy.table.Table(cosmology)``."""
tbl = table_cls(cosmo, cosmology_in_meta=in_meta)
assert isinstance(tbl, table_cls)
# the name & all parameters are columns
for n in ("name", *cosmo.__parameters__):
assert n in tbl.colnames
assert np.all(tbl[n] == getattr(cosmo, n))
# check if Cosmology is in metadata or a column
if in_meta:
assert tbl.meta["cosmology"] == cosmo.__class__.__qualname__
assert "cosmology" not in tbl.colnames
else:
assert "cosmology" not in tbl.meta
assert tbl["cosmology"][0] == cosmo.__class__.__qualname__
# the metadata is transferred
for k, v in cosmo.meta.items():
assert np.all(tbl.meta[k] == v)
# ===============================================================
# Usage Tests
def test_immutability(self, cosmo):
"""
Test immutability of cosmologies.
The metadata is mutable: see ``test_meta_mutable``.
"""
for n in cosmo.__all_parameters__:
with pytest.raises(AttributeError):
setattr(cosmo, n, getattr(cosmo, n))
def test_pickle_class(self, cosmo_cls, pickle_protocol):
"""Test classes can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo_cls, protocol=pickle_protocol)
unpickled = pickle.loads(f)
# test equality
assert unpickled == cosmo_cls
def test_pickle_instance(self, cosmo, pickle_protocol):
"""Test instances can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == cosmo
assert unpickled.meta == cosmo.meta
class TestCosmology(CosmologyTest):
"""Test :class:`astropy.cosmology.Cosmology`.
Subclasses should define tests for:
- ``test_clone_change_param()``
- ``test_repr()``
"""
def setup_class(self):
"""
Setup for testing.
Cosmology should not be instantiated, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubCosmology"] = SubCosmology
self.cls = SubCosmology
self._cls_args = dict(
H0=70 * (u.km / u.s / u.Mpc), Tcmb0=2.7 * u.K, m_nu=0.6 * u.eV
)
self.cls_kwargs = dict(name=self.__class__.__name__, meta={"a": "b"})
def teardown_class(self):
"""Teardown for testing."""
super().teardown_class(self)
_COSMOLOGY_CLASSES.pop("SubCosmology", None)
# ===============================================================
# Method & Attribute Tests
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``. It's an ABC."""
with pytest.raises(NotImplementedError, match="is_flat is not implemented"):
cosmo.is_flat
# -----------------------------------------------------------------------------
class FlatCosmologyMixinTest:
"""Tests for :class:`astropy.cosmology.core.FlatCosmologyMixin` subclasses.
The test suite structure mirrors the implementation of the tested code.
Just like :class:`astropy.cosmology.FlatCosmologyMixin` is an abstract
base class (ABC) that cannot be used by itself, so too is this corresponding
test class an ABC mixin.
E.g to use this class::
class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology):
...
"""
def test_nonflat_class_(self, cosmo_cls, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat_cls`."""
# Test it's a method on the class
assert issubclass(cosmo_cls, cosmo_cls.__nonflatclass__)
# It also works from the instance. # TODO! as a "metaclassmethod"
assert issubclass(cosmo_cls, cosmo.__nonflatclass__)
# Maybe not the most robust test, but so far all Flat classes have the
# name of their parent class.
assert cosmo.__nonflatclass__.__name__ in cosmo_cls.__name__
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
super().test_is_flat(cosmo_cls, cosmo)
# it's always True
assert cosmo.is_flat is True
def test_nonflat(self, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat`."""
assert cosmo.nonflat.is_equivalent(cosmo)
assert cosmo.is_equivalent(cosmo.nonflat)
# ------------------------------------------------
# clone
def test_clone_to_nonflat_equivalent(self, cosmo):
"""Test method ``.clone()``to_nonflat argument."""
# just converting the class
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
@abc.abstractmethod
def test_clone_to_nonflat_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s). No parameters
are changed here because FlatCosmologyMixin has no Parameters.
See class docstring for why this test method exists.
"""
# send to non-flat
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
# ------------------------------------------------
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`.
Normally this would pass up via super(), but ``__equiv__`` is meant
to be overridden, so we skip super().
e.g. FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestCosmology
vs FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestFLRW -> TestCosmology
"""
CosmologyTest.test_is_equivalent(self, cosmo)
# See FlatFLRWMixinTest for tests. It's a bit hard here since this class
# is for an ABC.
# ===============================================================
# Usage Tests
def test_subclassing(self, cosmo_cls):
"""Test when subclassing a flat cosmology."""
class SubClass1(cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass1.__nonflatclass__ is cosmo_cls.__nonflatclass__
# A more complex example is when Mixin classes are used.
class Mixin:
pass
class SubClass2(Mixin, cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass2.__nonflatclass__ is cosmo_cls.__nonflatclass__
# The order of the Mixin should not matter
class SubClass3(cosmo_cls, Mixin):
pass
# The classes have the same non-flat parent class
assert SubClass3.__nonflatclass__ is cosmo_cls.__nonflatclass__
def test__nonflatclass__multiple_nonflat_inheritance():
"""
Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.__nonflatclass__`
when there's more than one non-flat class in the inheritance.
"""
# Define a non-operable minimal subclass of Cosmology.
class SubCosmology2(Cosmology):
def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
@property
def is_flat(self):
return False
# Now make an ambiguous flat cosmology from the two SubCosmologies
with pytest.raises(TypeError, match="cannot create a consistent non-flat class"):
class FlatSubCosmology(FlatCosmologyMixin, SubCosmology, SubCosmology2):
@property
def nonflat(self):
pass
|
a27df6b6524b6e552983c0debefcd46e03cd374f0bcc73c9e5e751b427639e84 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the cosmology test suite
from the installed astropy. It makes use of the `pytest`_ testing framework.
"""
##############################################################################
# IMPORTS
# STDLIB
import inspect
# THIRD PARTY
import pytest
# LOCAL
from astropy.cosmology import core
__all__ = ["get_redshift_methods", "clean_registry"]
###############################################################################
# FUNCTIONS
def get_redshift_methods(cosmology, include_private=True, include_z2=True):
"""Get redshift methods from a cosmology.
Parameters
----------
cosmology : |Cosmology| class or instance
include_private : bool
Whether to include private methods, i.e. starts with an underscore.
include_z2 : bool
Whether to include methods that are functions of 2 (or more) redshifts,
not the more common 1 redshift argument.
Returns
-------
set[str]
The names of the redshift methods on `cosmology`, satisfying
`include_private` and `include_z2`.
"""
# Get all the method names, optionally sieving out private methods
methods = set()
for n in dir(cosmology):
try: # get method, some will error on ABCs
m = getattr(cosmology, n)
except NotImplementedError:
continue
# Add anything callable, optionally excluding private methods.
if callable(m) and (not n.startswith("_") or include_private):
methods.add(n)
# Sieve out incompatible methods.
# The index to check for redshift depends on whether cosmology is a class
# or instance and does/doesn't include 'self'.
iz1 = int(isinstance(cosmology, type))
for n in tuple(methods):
try:
sig = inspect.signature(getattr(cosmology, n))
except ValueError: # Remove non-introspectable methods.
methods.discard(n)
continue
else:
params = list(sig.parameters.keys())
# Remove non redshift methods:
if len(params) <= iz1: # Check there are enough arguments.
methods.discard(n)
elif len(params) >= iz1 + 1 and not params[iz1].startswith(
"z"
): # First non-self arg is z.
methods.discard(n)
# If methods with 2 z args are not allowed, the following arg is checked.
elif (
not include_z2
and (len(params) >= iz1 + 2)
and params[iz1 + 1].startswith("z")
):
methods.discard(n)
return methods
###############################################################################
# FIXTURES
@pytest.fixture
def clean_registry():
"""`pytest.fixture` for clearing and restoring ``_COSMOLOGY_CLASSES``."""
# TODO! with monkeypatch instead for thread safety.
ORIGINAL_COSMOLOGY_CLASSES = core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = {} # set as empty dict
yield core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = ORIGINAL_COSMOLOGY_CLASSES
|
c1e3c923a78bd837668556685e32d63e57765008a35357dc1267e582783f3652 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Astropy FLRW classes."""
from . import base, lambdacdm, w0cdm, w0wacdm, w0wzcdm, wpwazpcdm
from .base import *
from .lambdacdm import *
from .w0cdm import *
from .w0wacdm import *
from .w0wzcdm import *
from .wpwazpcdm import *
__all__ = (
base.__all__
+ lambdacdm.__all__
+ w0cdm.__all__
+ w0wacdm.__all__
+ wpwazpcdm.__all__
+ w0wzcdm.__all__
)
|
d75e4e69bdc6d4d61ddaf630ff88f1eabb44867f58377e64d2259e2d2e49826d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
import warnings
from abc import abstractmethod
from math import exp, floor, log, pi, sqrt
from numbers import Number
from typing import TYPE_CHECKING, Any, TypeVar
import numpy as np
from numpy import inf, sin
import astropy.constants as const
import astropy.units as u
from astropy.cosmology.core import Cosmology, FlatCosmologyMixin
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.parameter._converter import (
_validate_non_negative,
_validate_with_unit,
)
from astropy.cosmology.utils import aszarr, vectorize_redshift_method
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["FLRW", "FlatFLRWMixin"]
__doctest_requires__ = {"*": ["scipy"]}
if TYPE_CHECKING:
from collections.abc import Mapping
# isort: split
if HAS_SCIPY:
from scipy.integrate import quad
else:
def quad(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.integrate'")
##############################################################################
# Parameters
# Some conversion constants -- useful to compute them once here and reuse in
# the initialization rather than have every object do them.
_H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s)
_sec_to_Gyr = u.s.to(u.Gyr)
# const in critical density in cgs units (g cm^-3)
_critdens_const = (3 / (8 * pi * const.G)).cgs.value
# angle conversions
_radian_in_arcsec = (1 * u.rad).to(u.arcsec)
_radian_in_arcmin = (1 * u.rad).to(u.arcmin)
# Radiation parameter over c^2 in cgs (g cm^-3 K^-4)
_a_B_c2 = (4 * const.sigma_sb / const.c**3).cgs.value
# Boltzmann constant in eV / K
_kB_evK = const.k_B.to(u.eV / u.K)
# typing
_FLRWT = TypeVar("_FLRWT", bound="FLRW")
_FlatFLRWMixinT = TypeVar("_FlatFLRWMixinT", bound="FlatFLRWMixin")
##############################################################################
class _ScaleFactorMixin:
@property
def scale_factor0(self):
r"""Scale factor at redshift 0.
The scale factor is defined as :math:`a = \frac{a_0}{1 + z}`. The common
convention is to set :math:`a_0 = 1`. However, in some cases, e.g. in some old
CMB papers, :math:`a_0` is used to normalize `a` to be a convenient number at
the redshift of interest for that paper. Explicitly using :math:`a_0` in both
calculation and code avoids ambiguity.
"""
return u.Quantity(self.scale_factor(0), unit=u.one)
def scale_factor(self, z):
"""Scale factor at redshift ``z``.
The scale factor is defined as :math:`a = 1 / (1 + z)`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
a : ndarray or float
Scale factor at each input redshift.
Returns `float` if the input is scalar.
"""
return 1.0 / (aszarr(z) + 1.0)
class FLRW(Cosmology, _ScaleFactorMixin):
"""
A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you cannot instantiate examples of this
class, but must work with one of its subclasses, such as
:class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0. Note that this does not include massive
neutrinos.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Notes
-----
Class instances are immutable -- you cannot change the parameters' values.
That is, all of the above attributes (except meta) are read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
H0 = Parameter(
doc="Hubble constant as an `~astropy.units.Quantity` at z=0.",
unit="km/(s Mpc)",
fvalidate="scalar",
)
Om0 = Parameter(
doc="Omega matter; matter density/critical density at z=0.",
fvalidate="non-negative",
)
Ode0 = Parameter(
doc="Omega dark energy; dark energy density/critical density at z=0.",
fvalidate="float",
)
Tcmb0 = Parameter(
doc="Temperature of the CMB as `~astropy.units.Quantity` at z=0.",
unit="Kelvin",
fvalidate="scalar",
)
Neff = Parameter(
doc="Number of effective neutrino species.", fvalidate="non-negative"
)
m_nu = Parameter(
doc="Mass of neutrino species.", unit="eV", equivalencies=u.mass_energy()
)
Ob0 = Parameter(
doc="Omega baryon; baryonic matter density/critical density at z=0."
)
def __init__(
self,
H0,
Om0,
Ode0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None,
):
super().__init__(name=name, meta=meta)
# Assign (and validate) Parameters
self.H0 = H0
self.Om0 = Om0
self.Ode0 = Ode0
self.Tcmb0 = Tcmb0
self.Neff = Neff
self.m_nu = m_nu # (reset later, this is just for unit validation)
self.Ob0 = Ob0 # (must be after Om0)
# Derived quantities:
# Dark matter density; matter - baryons, if latter is not None.
self._Odm0 = None if Ob0 is None else (self._Om0 - self._Ob0)
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.0
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1
H0_s = self._H0.value * _H0units_to_invs
# Hubble time
self._hubble_time = (_sec_to_Gyr / H0_s) << u.Gyr
# Critical density at z=0 (grams per cubic cm)
cd0value = _critdens_const * H0_s**2
self._critical_density0 = cd0value << u.g / u.cm**3
# Compute photon density from Tcmb
self._Ogamma0 = _a_B_c2 * self._Tcmb0.value**4 / self._critical_density0.value
# Compute Neutrino temperature:
# The constant in front is (4/11)^1/3 -- see any cosmology book for an
# explanation -- for example, Weinberg 'Cosmology' p 154 eq (3.1.21).
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute neutrino parameters:
if self._m_nu is None:
self._nneutrinos = 0
self._neff_per_nu = None
self._massivenu = False
self._massivenu_mass = None
self._nmassivenu = self._nmasslessnu = None
else:
self._nneutrinos = floor(self._Neff)
# We are going to share Neff between the neutrinos equally. In
# detail this is not correct, but it is a standard assumption
# because properly calculating it is a) complicated b) depends on
# the details of the massive neutrinos (e.g., their weak
# interactions, which could be unusual if one is considering
# sterile neutrinos).
self._neff_per_nu = self._Neff / self._nneutrinos
# Now figure out if we have massive neutrinos to deal with, and if
# so, get the right number of masses. It is worth keeping track of
# massless ones separately (since they are easy to deal with, and a
# common use case is to have only one massive neutrino).
massive = np.nonzero(self._m_nu.value > 0)[0]
self._massivenu = massive.size > 0
self._nmassivenu = len(massive)
self._massivenu_mass = (
self._m_nu[massive].value if self._massivenu else None
)
self._nmasslessnu = self._nneutrinos - self._nmassivenu
# Compute Neutrino Omega and total relativistic component for massive
# neutrinos. We also store a list version, since that is more efficient
# to do integrals with (perhaps surprisingly! But small python lists
# are more efficient than small NumPy arrays).
if self._massivenu: # (`_massivenu` set in `m_nu`)
nu_y = self._massivenu_mass / (_kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._nu_y_list = self._nu_y.tolist()
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly The 0.2271...
# is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy
# density) times 7/8 for FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
self._nu_y = self._nu_y_list = None
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
# Subclasses should override this reference if they provide
# more efficient scalar versions of inv_efunc.
self._inv_efunc_scalar = self.inv_efunc
self._inv_efunc_scalar_args = ()
# ---------------------------------------------------------------
# Parameter details
@Ob0.validator
def Ob0(self, param, value):
"""Validate baryon density to None or positive float > matter density."""
if value is None:
return value
value = _validate_non_negative(self, param, value)
if value > self.Om0:
raise ValueError(
"baryonic density can not be larger than total matter density."
)
return value
@m_nu.validator
def m_nu(self, param, value):
"""Validate neutrino masses to right value, units, and shape.
There are no neutrinos if floor(Neff) or Tcmb0 are 0.
The number of neutrinos must match floor(Neff).
Neutrino masses cannot be negative.
"""
# Check if there are any neutrinos
if (nneutrinos := floor(self._Neff)) == 0 or self._Tcmb0.value == 0:
return None # None, regardless of input
# Validate / set units
value = _validate_with_unit(self, param, value)
# Check values and data shapes
if value.shape not in ((), (nneutrinos,)):
raise ValueError(
"unexpected number of neutrino masses — "
f"expected {nneutrinos}, got {len(value)}."
)
elif np.any(value.value < 0):
raise ValueError("invalid (negative) neutrino mass encountered.")
# scalar -> array
if value.isscalar:
value = np.full_like(value, value, shape=nneutrinos)
return value
# ---------------------------------------------------------------
# properties
@property
def is_flat(self):
"""Return bool; `True` if the cosmology is flat."""
return bool((self._Ok0 == 0.0) and (self.Otot0 == 1.0))
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return self._Om0 + self._Ogamma0 + self._Onu0 + self._Ode0 + self._Ok0
@property
def Odm0(self):
"""Omega dark matter; dark matter density/critical density at z=0."""
return self._Odm0
@property
def Ok0(self):
"""Omega curvature; the effective curvature density/critical density at z=0."""
return self._Ok0
@property
def Tnu0(self):
"""
Temperature of the neutrino background as `~astropy.units.Quantity` at z=0.
"""
return self._Tnu0
@property
def has_massive_nu(self):
"""Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def h(self):
"""Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]."""
return self._h
@property
def hubble_time(self):
"""Hubble time as `~astropy.units.Quantity`."""
return self._hubble_time
@property
def hubble_distance(self):
"""Hubble distance as `~astropy.units.Quantity`."""
return self._hubble_distance
@property
def critical_density0(self):
"""Critical density as `~astropy.units.Quantity` at z=0."""
return self._critical_density0
@property
def Ogamma0(self):
"""Omega gamma; the density/critical density of photons at z=0."""
return self._Ogamma0
@property
def Onu0(self):
"""Omega nu; the density/critical density of neutrinos at z=0."""
return self._Onu0
# ---------------------------------------------------------------
@abstractmethod
def w(self, z):
r"""The dark energy equation of state.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
`float` if scalar input.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1.
This must be overridden by subclasses.
"""
raise NotImplementedError("w(z) is not implemented")
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
The total density relative to the critical density at each redshift.
Returns float if input scalar.
"""
return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z)
def Om(self, z):
"""
Return the density parameter for non-relativistic matter
at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Om : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest; see `Onu`.
"""
z = aszarr(z)
return self._Om0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ob(self, z):
"""Return the density parameter for baryonic matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ob : ndarray or float
The density of baryonic matter relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
"""
if self._Ob0 is None:
raise ValueError("Baryon density not set for this cosmology")
z = aszarr(z)
return self._Ob0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Odm(self, z):
"""Return the density parameter for dark matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Odm : ndarray or float
The density of non-relativistic dark matter relative to the
critical density at each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest.
"""
if self._Odm0 is None:
raise ValueError(
"Baryonic density not set for this cosmology, "
"unclear meaning of dark matter density"
)
z = aszarr(z)
return self._Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ok(self, z):
"""
Return the equivalent density parameter for curvature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ok : ndarray or float
The equivalent density parameter for curvature at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ok0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ok0 * (z + 1.0) ** 2 * self.inv_efunc(z) ** 2
def Ode(self, z):
"""Return the density parameter for dark energy at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ode : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ode0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
def Ogamma(self, z):
"""Return the density parameter for photons at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ogamma : ndarray or float
The energy density of photons relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
return self._Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2
def Onu(self, z):
r"""Return the density parameter for neutrinos at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Onu : ndarray or float
The energy density of neutrinos relative to the critical density at
each redshift. Note that this includes their kinetic energy (if
they have mass), so it is not equal to the commonly used
:math:`\sum \frac{m_{\nu}}{94 eV}`, which does not include
kinetic energy.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Onu0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self.Ogamma(z) * self.nu_relative_density(z)
def Tcmb(self, z):
"""Return the CMB temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tcmb : `~astropy.units.Quantity` ['temperature']
The temperature of the CMB in K.
"""
return self._Tcmb0 * (aszarr(z) + 1.0)
def Tnu(self, z):
"""Return the neutrino temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tnu : `~astropy.units.Quantity` ['temperature']
The temperature of the cosmic neutrino background in K.
"""
return self._Tnu0 * (aszarr(z) + 1.0)
def nu_relative_density(self, z):
r"""Neutrino density function relative to the energy density in photons.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
f : ndarray or float
The neutrino density scaling factor relative to the density in
photons at each redshift.
Only returns `float` if z is scalar.
Notes
-----
The density in neutrinos is given by
.. math::
\rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \,
f\left(m_{\nu} a / T_{\nu 0} \right) \,
\rho_{\gamma} \left( a \right)
where
.. math::
f \left(y\right) = \frac{120}{7 \pi^4}
\int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated for each
one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This
method returns :math:`0.2271 f` using an analytical fitting formula
given in Komatsu et al. 2011, ApJS 192, 18.
"""
# Note that there is also a scalar-z-only cython implementation of
# this in scalar_inv_efuncs.pyx, so if you find a problem in this
# you need to update there too.
# See Komatsu et al. 2011, eq 26 and the surrounding discussion
# for an explanation of what we are doing here.
# However, this is modified to handle multiple neutrino masses
# by computing the above for each mass, then summing
prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book
# The massive and massless contribution must be handled separately
# But check for common cases first
z = aszarr(z)
if not self._massivenu:
return (
prefac * self._Neff * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
)
# These are purely fitting constants -- see the Komatsu paper
p = 1.83
invp = 0.54644808743 # 1.0 / p
k = 0.3173
curr_nu_y = self._nu_y / (1.0 + np.expand_dims(z, axis=-1))
rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu
return prefac * self._neff_per_nu * rel_mass
def _w_integrand(self, ln1pz):
"""Internal convenience function for w(z) integral (eq. 5 of [1]_).
Parameters
----------
ln1pz : `~numbers.Number` or scalar ndarray
Assumes scalar input, since this should only be called inside an
integral.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
return 1.0 + self.w(exp(ln1pz) - 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and is given by
.. math::
I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} }
\left[ 1 + w\left( a^{\prime} \right) \right] \right)
The actual integral used is rewritten from [1]_ to be in terms of z.
It will generally helpful for subclasses to overload this method if
the integral can be done analytically for the particular dark
energy equation of state that they implement.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
# This allows for an arbitrary w(z) following eq (5) of
# Linder 2003, PRL 90, 91301. The code here evaluates
# the integral numerically. However, most popular
# forms of w(z) are designed to make this integral analytic,
# so it is probably a good idea for subclasses to overload this
# method if an analytic form is available.
z = aszarr(z)
if not isinstance(z, (Number, np.generic)): # array/Quantity
ival = np.array(
[quad(self._w_integrand, 0, log(1 + redshift))[0] for redshift in z]
)
return np.exp(3 * ival)
else: # scalar
ival = quad(self._w_integrand, 0, log(z + 1.0))[0]
return exp(3 * ival)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
Notes
-----
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(
zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0)
+ self._Ode0 * self.de_density_scale(z)
)
def inv_efunc(self, z):
"""Inverse of ``efunc``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the inverse Hubble constant.
Returns `float` if the input is scalar.
"""
# Avoid the function overhead by repeating code
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (
zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0)
+ self._Ode0 * self.de_density_scale(z)
) ** (-0.5)
def _lookback_time_integrand_scalar(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : float
Input redshift.
Returns
-------
I : float
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0)
def lookback_time_integrand(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : float or array
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return self.inv_efunc(z) / (z + 1.0)
def _abs_distance_integrand_scalar(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
args = self._inv_efunc_scalar_args
return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *args)
def abs_distance_integrand(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float or array
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return (z + 1.0) ** 2 * self.inv_efunc(z)
def H(self, z):
"""Hubble parameter (km/s/Mpc) at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
H : `~astropy.units.Quantity` ['frequency']
Hubble parameter at each input redshift.
"""
return self._H0 * self.efunc(z)
def lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a lookback time.
"""
return self._lookback_time(z)
def _lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * self._integral_lookback_time(z)
@vectorize_redshift_method
def _integral_lookback_time(self, z, /):
"""Lookback time to redshift ``z``. Value in units of Hubble time.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
Lookback time to each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._lookback_time_integrand_scalar, 0, z)[0]
def lookback_distance(self, z):
"""
The lookback distance is the light travel time distance to a given
redshift. It is simply c * lookback_time. It may be used to calculate
the proper distance between two redshifts, e.g. for the mean free path
to ionizing radiation.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Lookback distance in Mpc
"""
return (self.lookback_time(z) * const.c).to(u.Mpc)
def age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return self._age(z)
def _age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
This internal function exists to be re-defined for optimizations.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
return self._hubble_time * self._integral_age(z)
@vectorize_redshift_method
def _integral_age(self, z, /):
"""Age of the universe at redshift ``z``. Value in units of Hubble time.
Calculated using explicit integration.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
The age of the universe at each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return quad(self._lookback_time_integrand_scalar, z, inf)[0]
def critical_density(self, z):
"""Critical density in grams per cubic cm at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
rho : `~astropy.units.Quantity`
Critical density in g/cm^3 at each input redshift.
"""
return self._critical_density0 * (self.efunc(z)) ** 2
def comoving_distance(self, z):
"""Comoving line-of-sight distance in Mpc at a given redshift.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc to each input redshift.
"""
return self._comoving_distance_z1z2(0, z)
def _comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._integral_comoving_distance_z1z2(z1, z2)
@vectorize_redshift_method(nin=2)
def _integral_comoving_distance_z1z2_scalar(self, z1, z2, /):
"""
Comoving line-of-sight distance between objects at redshifts ``z1`` and
``z2``. Value in Mpc.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : float or ndarray
Comoving distance in Mpc between each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0]
def _integral_comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``. The comoving distance along the line-of-sight
between two objects remains constant with time for objects in the
Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'] or array-like
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2) # fmt: skip
def comoving_transverse_distance(self, z):
r"""Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero (as in the current
concordance Lambda-CDM model).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
return self._comoving_transverse_distance_z1z2(0, z)
def _comoving_transverse_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z2`` as
seen from redshift ``z1`` corresponding to an angular separation of
1 radian. This is the same as the comoving distance if :math:`\Omega_k`
is zero (as in the current concordance Lambda-CDM model).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc between input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
Ok0 = self._Ok0
dc = self._comoving_distance_z1z2(z1, z2)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self._hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z):
"""Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift ``z`` ([1]_, [2]_, [3]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Angular diameter distance in Mpc at each input redshift.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424.
.. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67.
.. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327.
"""
z = aszarr(z)
return self.comoving_transverse_distance(z) / (z + 1.0)
def luminosity_distance(self, z):
"""Luminosity distance in Mpc at redshift ``z``.
This is the distance to use when converting between the bolometric flux
from an object at redshift ``z`` and its bolometric luminosity [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Luminosity distance in Mpc at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a luminosity distance.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
z = aszarr(z)
return (z + 1.0) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(self, z1, z2):
"""Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing, for example computing the angular
diameter distance between a lensed galaxy and the foreground lens.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. For most practical applications such as
gravitational lensing, ``z2`` should be larger than ``z1``. The
method will work for ``z2 < z1``; however, this will return
negative distances.
Returns
-------
d : `~astropy.units.Quantity`
The angular diameter distance between each input redshift pair.
Returns scalar if input is scalar, array else-wise.
"""
z1, z2 = aszarr(z1), aszarr(z2)
if np.any(z2 < z1):
warnings.warn(
f"Second redshift(s) z2 ({z2}) is less than first "
f"redshift(s) z1 ({z1}).",
AstropyUserWarning,
)
return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0)
@vectorize_redshift_method
def absorption_distance(self, z, /):
"""Absorption distance at redshift ``z``.
This is used to calculate the number of objects with some cross section
of absorption and number density intersecting a sightline per unit
redshift path ([1]_, [2]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : float or ndarray
Absorption distance (dimensionless) at each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
.. [2] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
return quad(self._abs_distance_integrand_scalar, 0, z)[0]
def distmod(self, z):
"""Distance modulus at redshift ``z``.
The distance modulus is defined as the (apparent magnitude - absolute
magnitude) for an object at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
distmod : `~astropy.units.Quantity` ['length']
Distance modulus at each input redshift, in magnitudes.
See Also
--------
z_at_value : Find the redshift corresponding to a distance modulus.
"""
# Remember that the luminosity distance is in Mpc
# Abs is necessary because in certain obscure closed cosmologies
# the distance modulus can be negative -- which is okay because
# it enters as the square.
val = 5.0 * np.log10(abs(self.luminosity_distance(z).value)) + 25.0
return u.Quantity(val, u.mag)
def comoving_volume(self, z):
r"""Comoving volume in cubic Mpc at redshift ``z``.
This is the volume of the universe encompassed by redshifts less than
``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius
`comoving_distance` but it is less intuitive if :math:`\Omega_k` is not.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
V : `~astropy.units.Quantity`
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self._Ok0
if Ok0 == 0:
return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3
dh = self._hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4.0 * pi * dh**3 / (2.0 * Ok0) * u.Mpc**3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsin(term3))
def differential_comoving_volume(self, z):
"""Differential comoving volume at redshift z.
Useful for calculating the effective comoving volume.
For example, allows for integration over a comoving volume that has a
sensitivity function that changes with redshift. The total comoving
volume is given by integrating ``differential_comoving_volume`` to
redshift ``z`` and multiplying by a solid angle.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
dV : `~astropy.units.Quantity`
Differential comoving volume per redshift per steradian at each
input redshift.
"""
dm = self.comoving_transverse_distance(z)
return self._hubble_distance * (dm**2.0) / (self.efunc(z) << u.steradian)
def kpc_comoving_per_arcmin(self, z):
"""
Separation in transverse comoving kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return self.comoving_transverse_distance(z).to(u.kpc) / _radian_in_arcmin
def kpc_proper_per_arcmin(self, z):
"""
Separation in transverse proper kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in proper kpc corresponding to an arcmin at each input
redshift.
"""
return self.angular_diameter_distance(z).to(u.kpc) / _radian_in_arcmin
def arcsec_per_kpc_comoving(self, z):
"""
Angular separation in arcsec corresponding to a comoving kpc at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a comoving kpc at
each input redshift.
"""
return _radian_in_arcsec / self.comoving_transverse_distance(z).to(u.kpc)
def arcsec_per_kpc_proper(self, z):
"""
Angular separation in arcsec corresponding to a proper kpc at redshift
``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a proper kpc at
each input redshift.
"""
return _radian_in_arcsec / self.angular_diameter_distance(z).to(u.kpc)
class FlatFLRWMixin(FlatCosmologyMixin):
"""
Mixin class for flat FLRW cosmologies. Do NOT instantiate directly.
Must precede the base class in the multiple-inheritance so that this
mixin's ``__init__`` proceeds the base class'.
Note that all instances of ``FlatFLRWMixin`` are flat, but not all
flat cosmologies are instances of ``FlatFLRWMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
Ode0 = FLRW.Ode0.clone(derived=True) # same as FLRW, but now a derived param.
def __init_subclass__(cls):
super().__init_subclass__()
if "Ode0" in cls._init_signature.parameters:
raise TypeError(
"subclasses of `FlatFLRWMixin` cannot have `Ode0` in `__init__`"
)
def __init__(self, *args, **kw):
super().__init__(*args, **kw) # guaranteed not to have `Ode0`
# Do some twiddling after the fact to get flatness
self._Ok0 = 0.0
self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0)
@lazyproperty
def nonflat(self: _FlatFLRWMixinT) -> _FLRWT:
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self.__nonflatclass__._init_signature.bind_partial(
**self._init_arguments, Ode0=self.Ode0
)
# Make new instance, respecting args vs kwargs
inst = self.__nonflatclass__(*ba.args, **ba.kwargs)
# Because of machine precision, make sure parameters exactly match
for n in inst.__all_parameters__ + ("Ok0",):
setattr(inst, "_" + n, getattr(self, n))
return inst
def clone(
self, *, meta: Mapping | None = None, to_nonflat: bool = None, **kwargs: Any
):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool or None, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
With 'to_nonflat' `True`, ``Ode0`` can be modified.
>>> Planck13.clone(to_nonflat=True, Ode0=1)
LambdaCDM(name="Planck13 (modified)", H0=67.77 km / (Mpc s),
Om0=0.30712, Ode0=1.0, ...
"""
return super().clone(meta=meta, to_nonflat=to_nonflat, **kwargs)
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return 1.0
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
Returns float if input scalar. Value of 1.
"""
return (
1.0 if isinstance(z, (Number, np.generic)) else np.ones_like(z, subok=False)
)
|
bf3b701c360dc3e67ba8ad74dace2a32a3a203bf68bb41bf74a582bf7ac6e82e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
from collections import defaultdict
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.table import QTable, Row
from .mapping import from_mapping
def from_row(row, *, move_to_meta=False, cosmology=None, rename=None):
"""Instantiate a `~astropy.cosmology.Cosmology` from a `~astropy.table.Row`.
Parameters
----------
row : `~astropy.table.Row`
The object containing the Cosmology information.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, type, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
rename : dict or None (optional, keyword-only)
A dictionary mapping columns in the row to fields of the `~astropy.cosmology.Cosmology`.
Returns
-------
`~astropy.cosmology.Cosmology`
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a Row with
``from_row``, we will first make a `~astropy.table.Row` using
:func:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Now this row can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(cr, format="astropy.row")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
If a `~astropy.table.Row` object has columns that do not match the fields of the
`~astropy.cosmology.Cosmology` class, they can be mapped using the ``rename``
keyword argument.
>>> renamed = Planck18.to_format("astropy.row", rename={"H0": "Hubble"})
>>> renamed
<Row index=0>
cosmology name Hubble Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
>>> cosmo = Cosmology.from_format(renamed, format="astropy.row",
... rename={"Hubble": "H0"})
>>> cosmo == Planck18
True
"""
inv_rename = {v: k for k, v in rename.items()} if rename is not None else {}
kname = inv_rename.get("name", "name")
kmeta = inv_rename.get("meta", "meta")
kcosmo = inv_rename.get("cosmology", "cosmology")
# special values
name = row.get(kname)
meta = defaultdict(dict, copy.deepcopy(row.meta))
# Now need to add the Columnar metadata. This is only available on the
# parent table. If Row is ever separated from Table, this should be moved
# to ``to_table``.
for col in row._table.itercols():
if col.info.meta: # Only add metadata if not empty
meta[col.name].update(col.info.meta)
# turn row into mapping, filling cosmo if not in a column
mapping = dict(row)
mapping[kname] = name
mapping.setdefault(kcosmo, meta.pop(kcosmo, None))
mapping[kmeta] = dict(meta)
# build cosmology from map
return from_mapping(
mapping, move_to_meta=move_to_meta, cosmology=cosmology, rename=rename
)
def to_row(cosmology, *args, cosmology_in_meta=False, table_cls=QTable, rename=None):
"""Serialize the cosmology into a `~astropy.table.Row`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`
The cosmology instance to convert to a mapping.
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
table_cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` class or subclass type to use.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool
Whether to put the cosmology class in the Table metadata (if `True`) or
as the first column (if `False`, default).
Returns
-------
`~astropy.table.Row`
With columns for the cosmology parameters, and metadata in the Table's
``meta`` attribute. The cosmology class name will either be a column
or in ``meta``, depending on 'cosmology_in_meta'.
Examples
--------
A `~astropy.cosmology.Cosmology` as a `~astropy.table.Row` will have
the cosmology's name and parameters as columns.
>>> from astropy.cosmology import Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
The cosmological class and other metadata, e.g. a paper reference, are in
the Table's metadata.
The columns can be renamed using the ``rename`` keyword argument.
>>> renamed = Planck18.to_format("astropy.row", rename={"H0": "Hubble"})
>>> renamed
<Row index=0>
cosmology name Hubble Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
"""
from .table import to_table
table = to_table(
cosmology, cls=table_cls, cosmology_in_meta=cosmology_in_meta, rename=rename
)
return table[0] # extract row from table
def row_identify(origin, format, *args, **kwargs):
"""Identify if object uses the `~astropy.table.Row` format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Row) and (format in (None, "astropy.row"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.row", Cosmology, from_row)
convert_registry.register_writer("astropy.row", Cosmology, to_row)
convert_registry.register_identifier("astropy.row", Cosmology, row_identify)
|
9d626673f67b9a7ed9e7027913acca8c091fb5a02b73efb3572b6b243b087b71 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.table import Column, QTable, Table
from .mapping import to_mapping
from .row import from_row
from .utils import convert_parameter_to_column
def from_table(table, index=None, *, move_to_meta=False, cosmology=None, rename=None):
"""Instantiate a `~astropy.cosmology.Cosmology` from a |QTable|.
Parameters
----------
table : `~astropy.table.Table`
The object to parse into a |Cosmology|.
index : int, str, or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index``
is a string, the "name" column is used as the indexing column.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str or type or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
rename : dict or None (optional, keyword-only)
A dictionary mapping columns in 'table' to fields of the `~astropy.cosmology.Cosmology` class.
Returns
-------
`~astropy.cosmology.Cosmology`
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a Table with
``from_table``, we will first make a |QTable| using
:func:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> ct = Planck18.to_format("astropy.table")
>>> ct
<QTable length=1>
name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Now this table can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(ct, format="astropy.table")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
Specific cosmology classes can be used to parse the data. The class'
default parameter values are used to fill in any information missing in the
data.
>>> from astropy.cosmology import FlatLambdaCDM
>>> del ct["Tcmb0"] # show FlatLambdaCDM provides default
>>> FlatLambdaCDM.from_format(ct)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897)
For tables with multiple rows of cosmological parameters, the ``index``
argument is needed to select the correct row. The index can be an integer
for the row number or, if the table is indexed by a column, the value of
that column. If the table is not indexed and ``index`` is a string, the
"name" column is used as the indexing column.
Here is an example where ``index`` is needed and can be either an integer
(for the row number) or the name of one of the cosmologies, e.g. 'Planck15'.
>>> from astropy.cosmology import Planck13, Planck15, Planck18
>>> from astropy.table import vstack
>>> cts = vstack([c.to_format("astropy.table")
... for c in (Planck13, Planck15, Planck18)],
... metadata_conflicts='silent')
>>> cts
<QTable length=3>
name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- --------
Planck13 67.77 0.30712 2.7255 3.046 0.0 .. 0.06 0.048252
Planck15 67.74 0.3075 2.7255 3.046 0.0 .. 0.06 0.0486
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
>>> cosmo = Cosmology.from_format(cts, index=1, format="astropy.table")
>>> cosmo == Planck15
True
Fields in the table can be renamed to match the `~astropy.cosmology.Cosmology`
class' signature using the ``rename`` argument. This is useful when the
table's column names do not match the class' parameter names.
>>> renamed_table = Planck18.to_format("astropy.table", rename={"H0": "Hubble"})
>>> renamed_table
<QTable length=1>
name Hubble Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
>>> cosmo = Cosmology.from_format(renamed_table, format="astropy.table",
... rename={"Hubble": "H0"})
>>> cosmo == Planck18
True
For further examples, see :doc:`astropy:cosmology/io`.
"""
# Get row from table
# string index uses the indexed column on the table to find the row index.
if isinstance(index, str):
if not table.indices: # no indexing column, find by string match
nc = "name" # default name column
if rename is not None: # from inverted `rename`
for key, value in rename.items():
if value == "name":
nc = key
break
indices = np.where(table[nc] == index)[0]
else: # has indexing column
indices = table.loc_indices[index] # need to convert to row index (int)
if isinstance(indices, (int, np.integer)): # loc_indices
index = indices
elif len(indices) == 1: # only happens w/ np.where
index = indices[0]
elif len(indices) == 0: # matches from loc_indices
raise KeyError(f"No matches found for key {indices}")
else: # like the Highlander, there can be only 1 Cosmology
raise ValueError(f"more than one cosmology found for key {indices}")
# no index is needed for a 1-row table. For a multi-row table...
if index is None:
if len(table) != 1: # multi-row table and no index
raise ValueError(
"need to select a specific row (e.g. index=1) when "
"constructing a Cosmology from a multi-row table."
)
else: # single-row table
index = 0
row = table[index] # index is now the row index (int)
# parse row to cosmo
return from_row(row, move_to_meta=move_to_meta, cosmology=cosmology, rename=rename)
def to_table(cosmology, *args, cls=QTable, cosmology_in_meta=True, rename=None):
"""Serialize the cosmology into a `~astropy.table.QTable`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`
The cosmology instance to convert to a table.
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` class or subclass type to return.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool (optional, keyword-only)
Whether to put the cosmology class in the Table metadata (if `True`,
default) or as the first column (if `False`).
Returns
-------
`~astropy.table.QTable`
With columns for the cosmology parameters, and metadata and
cosmology class name in the Table's ``meta`` attribute
Raises
------
TypeError
If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table`
Examples
--------
A Cosmology as a `~astropy.table.QTable` will have the cosmology's name and
parameters as columns.
>>> from astropy.cosmology import Planck18
>>> ct = Planck18.to_format("astropy.table")
>>> ct
<QTable length=1>
name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
The cosmological class and other metadata, e.g. a paper reference, are in
the Table's metadata.
>>> ct.meta
OrderedDict([..., ('cosmology', 'FlatLambdaCDM')])
To move the cosmology class from the metadata to a Table row, set the
``cosmology_in_meta`` argument to `False`:
>>> Planck18.to_format("astropy.table", cosmology_in_meta=False)
<QTable length=1>
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Astropy recommends `~astropy.table.QTable` for tables with
`~astropy.units.Quantity` columns. However the returned type may be
overridden using the ``cls`` argument:
>>> from astropy.table import Table
>>> Planck18.to_format("astropy.table", cls=Table)
<Table length=1>
...
Fields of the cosmology may be renamed using the ``rename`` argument.
>>> Planck18.to_format("astropy.table", rename={"H0": "Hubble"})
<QTable length=1>
name Hubble Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
"""
if not issubclass(cls, Table):
raise TypeError(f"'cls' must be a (sub)class of Table, not {type(cls)}")
# Start by getting a map representation.
data = to_mapping(cosmology)
data["cosmology"] = data["cosmology"].__qualname__ # change to str
# Metadata
meta = data.pop("meta") # remove the meta
if cosmology_in_meta:
meta["cosmology"] = data.pop("cosmology")
# Need to turn everything into something Table can process:
# - Column for Parameter
# - list for anything else
cosmo_cls = cosmology.__class__
for k, v in data.items():
if k in cosmology.__parameters__:
col = convert_parameter_to_column(
getattr(cosmo_cls, k), v, cosmology.meta.get(k)
)
else:
col = Column([v])
data[k] = col
tbl = cls(data, meta=meta)
# Renames
renames = rename or {}
for name in tbl.colnames:
tbl.rename_column(name, renames.get(name, name))
# Add index
tbl.add_index(renames.get("name", "name"), unique=True)
return tbl
def table_identify(origin, format, *args, **kwargs):
"""Identify if object uses the Table format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Table) and (format in (None, "astropy.table"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.table", Cosmology, from_table)
convert_registry.register_writer("astropy.table", Cosmology, to_table)
convert_registry.register_identifier("astropy.table", Cosmology, table_identify)
|
7bd9d46ae53f9e59c119d3e4cd07ff01ce28568765c617c2f881d0489a6ff6a8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Read/Write/Interchange methods for `astropy.cosmology`. **NOT public API**.
"""
# Import to register with the I/O machinery
from . import cosmology, ecsv, html, latex, mapping, model, row, table, yaml
|
1752f6225bf6bea5b5bf9e1af65ae3f767732be74645dadcc2202afd623d8414 | import astropy.units as u
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from astropy.cosmology.parameter import Parameter
from astropy.table import QTable
from .table import to_table
_FORMAT_TABLE = {
"H0": "$H_0$",
"Om0": r"$\Omega_{m,0}$",
"Ode0": r"$\Omega_{\Lambda,0}$",
"Tcmb0": "$T_{0}$",
"Neff": "$N_{eff}$",
"m_nu": "$m_{nu}$",
"Ob0": r"$\Omega_{b,0}$",
"w0": "$w_{0}$",
"wa": "$w_{a}$",
"wz": "$w_{z}$",
"wp": "$w_{p}$",
"zp": "$z_{p}$",
}
def write_latex(
cosmology, file, *, overwrite=False, cls=QTable, latex_names=True, **kwargs
):
r"""Serialize the |Cosmology| into a LaTeX.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
file : path-like or file-like
Location to save the serialized cosmology.
overwrite : bool
Whether to overwrite the file, if it exists.
cls : type, optional keyword-only
Astropy :class:`~astropy.table.Table` (sub)class to use when writing.
Default is :class:`~astropy.table.QTable`.
latex_names : bool, optional keyword-only
Whether to use LaTeX names for the parameters. Default is `True`.
**kwargs
Passed to ``cls.write``
Raises
------
TypeError
If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table`
"""
# Check that the format is 'latex', 'ascii.latex' (or not specified)
format = kwargs.pop("format", "latex")
if format not in ("latex", "ascii.latex"):
raise ValueError(f"format must be 'latex' or 'ascii.latex', not {format}")
# Set cosmology_in_meta as false for now since there is no metadata being kept
table = to_table(cosmology, cls=cls, cosmology_in_meta=False)
cosmo_cls = type(cosmology)
for name, col in table.columns.copy().items():
param = getattr(cosmo_cls, name, None)
if not isinstance(param, Parameter) or param.unit in (None, u.one):
continue
# Get column to correct unit
table[name] <<= param.unit
# Convert parameter names to LaTeX format
if latex_names:
new_names = [_FORMAT_TABLE.get(k, k) for k in cosmology.__parameters__]
table.rename_columns(cosmology.__parameters__, new_names)
table.write(file, overwrite=overwrite, format="latex", **kwargs)
# ===================================================================
# Register
readwrite_registry.register_writer("latex", Cosmology, write_latex)
readwrite_registry.register_writer("ascii.latex", Cosmology, write_latex)
|
c8941435d0692f66ea9bc9955bb31d0757f4c8b514439d4fc498ab7a806b6a73 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions, included here **FOR REFERENCE ONLY** since
the io registry cannot be displayed. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
""" # this is shown in the docs.
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
from astropy.io.misc.yaml import AstropyDumper, AstropyLoader, dump, load
from .mapping import from_mapping
from .utils import FULLQUALNAME_SUBSTITUTIONS as QNS
__all__ = [] # nothing is publicly scoped
##############################################################################
# Serializer Functions
# these do Cosmology <-> YAML through a modified dictionary representation of
# the Cosmology object. The Unified-I/O functions are just wrappers to the YAML
# that calls these functions.
def yaml_representer(tag):
""":mod:`yaml` representation of |Cosmology| object.
Parameters
----------
tag : str
The class tag, e.g. '!astropy.cosmology.LambdaCDM'
Returns
-------
representer : callable[[`~astropy.io.misc.yaml.AstropyDumper`, |Cosmology|], str]
Function to construct :mod:`yaml` representation of |Cosmology| object.
"""
def representer(dumper, obj):
"""Cosmology yaml representer function for {}.
Parameters
----------
dumper : `~astropy.io.misc.yaml.AstropyDumper`
obj : `~astropy.cosmology.Cosmology`
Returns
-------
str
:mod:`yaml` representation of |Cosmology| object.
"""
# convert to mapping
map = obj.to_format("mapping")
# remove the cosmology class info. It's already recorded in `tag`
map.pop("cosmology")
# make the metadata serializable in an order-preserving way.
map["meta"] = tuple(map["meta"].items())
return dumper.represent_mapping(tag, map)
representer.__doc__ = representer.__doc__.format(tag)
return representer
def yaml_constructor(cls):
"""Cosmology| object from :mod:`yaml` representation.
Parameters
----------
cls : type
The class type, e.g. `~astropy.cosmology.LambdaCDM`.
Returns
-------
constructor : callable
Function to construct |Cosmology| object from :mod:`yaml` representation.
"""
def constructor(loader, node):
"""Cosmology yaml constructor function.
Parameters
----------
loader : `~astropy.io.misc.yaml.AstropyLoader`
node : `yaml.nodes.MappingNode`
yaml representation of |Cosmology| object.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
"""
# create mapping from YAML node
map = loader.construct_mapping(node)
# restore metadata to dict
map["meta"] = dict(map["meta"])
# get cosmology class qualified name from node
cosmology = str(node.tag).split(".")[-1]
# create Cosmology from mapping
return from_mapping(map, move_to_meta=False, cosmology=cosmology)
return constructor
def register_cosmology_yaml(cosmo_cls):
"""Register :mod:`yaml` for Cosmology class.
Parameters
----------
cosmo_cls : `~astropy.cosmology.Cosmology` class
"""
fqn = f"{cosmo_cls.__module__}.{cosmo_cls.__qualname__}"
tag = "!" + QNS.get(
fqn, fqn
) # Possibly sub fully qualified name for a preferred path
AstropyDumper.add_representer(cosmo_cls, yaml_representer(tag))
AstropyLoader.add_constructor(tag, yaml_constructor(cosmo_cls))
##############################################################################
# Unified-I/O Functions
def from_yaml(yml, *, cosmology=None):
"""Load `~astropy.cosmology.Cosmology` from :mod:`yaml` object.
Parameters
----------
yml : str
:mod:`yaml` representation of |Cosmology| object
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The expected cosmology class (or string name thereof). This argument is
is only checked for correctness if not `None`.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Raises
------
TypeError
If the |Cosmology| object loaded from ``yml`` is not an instance of
the ``cosmology`` (and ``cosmology`` is not `None`).
"""
with u.add_enabled_units(cu):
cosmo = load(yml)
# Check argument `cosmology`, if not None
# This kwarg is required for compatibility with |Cosmology.from_format|
if isinstance(cosmology, str):
cosmology = _COSMOLOGY_CLASSES[cosmology]
if cosmology is not None and not isinstance(cosmo, cosmology):
raise TypeError(f"cosmology {cosmo} is not an {cosmology} instance.")
return cosmo
def to_yaml(cosmology, *args):
"""Return the cosmology class, parameters, and metadata as a :mod:`yaml` object.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
Returns
-------
str
:mod:`yaml` representation of |Cosmology| object
"""
return dump(cosmology)
# ``read`` cannot handle non-path strings.
# TODO! this says there should be different types of I/O registries.
# not just hacking object conversion on top of file I/O.
# def yaml_identify(origin, format, *args, **kwargs):
# """Identify if object uses the yaml format.
#
# Returns
# -------
# bool
# """
# itis = False
# if origin == "read":
# itis = isinstance(args[1], str) and args[1][0].startswith("!")
# itis &= format in (None, "yaml")
#
# return itis
# ===================================================================
# Register
convert_registry.register_reader("yaml", Cosmology, from_yaml)
convert_registry.register_writer("yaml", Cosmology, to_yaml)
# convert_registry.register_identifier("yaml", Cosmology, yaml_identify)
|
e8de2d4ae387ad27ab5451ccdc8dba5ba81e5a70a7d409d5fb7811c2bbd7a034 | import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from astropy.cosmology.parameter import Parameter
from astropy.table import QTable
from .table import from_table, to_table
# Format look-up for conversion, {original_name: new_name}
# TODO! move this information into the Parameters themselves
_FORMAT_TABLE = {
"H0": "$$H_{0}$$",
"Om0": "$$\\Omega_{m,0}$$",
"Ode0": "$$\\Omega_{\\Lambda,0}$$",
"Tcmb0": "$$T_{0}$$",
"Neff": "$$N_{eff}$$",
"m_nu": "$$m_{nu}$$",
"Ob0": "$$\\Omega_{b,0}$$",
"w0": "$$w_{0}$$",
"wa": "$$w_{a}$$",
"wz": "$$w_{z}$$",
"wp": "$$w_{p}$$",
"zp": "$$z_{p}$$",
}
def read_html_table(
filename,
index=None,
*,
move_to_meta=False,
cosmology=None,
latex_names=True,
**kwargs,
):
"""Read a |Cosmology| from an HTML file.
Parameters
----------
filename : path-like or file-like
From where to read the Cosmology.
index : int or str or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index`` is a
string, the "name" column is used as the indexing column.
move_to_meta : bool, optional keyword-only
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict (e.g. for
``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be
``{'key': 10}``).
cosmology : str or |Cosmology| class or None, optional keyword-only
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter
values, filling in any non-mandatory arguments missing in 'table'.
latex_names : bool, optional keyword-only
Whether the |Table| (might) have latex column names for the parameters
that need to be mapped to the correct parameter name -- e.g. $$H_{0}$$
to 'H0'. This is `True` by default, but can be turned off (set to
`False`) if there is a known name conflict (e.g. both an 'H0' and
'$$H_{0}$$' column) as this will raise an error. In this case, the
correct name ('H0') is preferred.
**kwargs : Any
Passed to :attr:`astropy.table.QTable.read`. ``format`` is set to
'ascii.html', regardless of input.
Returns
-------
|Cosmology| subclass instance
Raises
------
ValueError
If the keyword argument 'format' is given and is not "ascii.html".
"""
# Check that the format is 'ascii.html' (or not specified)
format = kwargs.pop("format", "ascii.html")
if format != "ascii.html":
raise ValueError(f"format must be 'ascii.html', not {format}")
# Reading is handled by `QTable`.
with u.add_enabled_units(cu): # (cosmology units not turned on by default)
table = QTable.read(filename, format="ascii.html", **kwargs)
# Need to map the table's column names to Cosmology inputs (parameter
# names).
# TODO! move the `latex_names` into `from_table`
if latex_names:
table_columns = set(table.colnames)
for name, latex in _FORMAT_TABLE.items():
if latex in table_columns:
table.rename_column(latex, name)
# Build the cosmology from table, using the private backend.
return from_table(
table, index=index, move_to_meta=move_to_meta, cosmology=cosmology, rename=None
)
def write_html_table(
cosmology, file, *, overwrite=False, cls=QTable, latex_names=False, **kwargs
):
r"""Serialize the |Cosmology| into a HTML table.
Parameters
----------
cosmology : |Cosmology| subclass instance file : path-like or file-like
Location to save the serialized cosmology.
file : path-like or file-like
Where to write the html table.
overwrite : bool, optional keyword-only
Whether to overwrite the file, if it exists.
cls : |Table| class, optional keyword-only
Astropy |Table| (sub)class to use when writing. Default is |QTable|
class.
latex_names : bool, optional keyword-only
Whether to format the parameters (column) names to latex -- e.g. 'H0' to
$$H_{0}$$.
**kwargs : Any
Passed to ``cls.write``.
Raises
------
TypeError
If the optional keyword-argument 'cls' is not a subclass of |Table|.
ValueError
If the keyword argument 'format' is given and is not "ascii.html".
Notes
-----
A HTML file containing a Cosmology HTML table should have scripts enabling
MathJax.
::
<script
src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<script type="text/javascript" id="MathJax-script" async
src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js">
</script>
"""
# Check that the format is 'ascii.html' (or not specified)
format = kwargs.pop("format", "ascii.html")
if format != "ascii.html":
raise ValueError(f"format must be 'ascii.html', not {format}")
# Set cosmology_in_meta as false for now since there is no metadata being kept
table = to_table(cosmology, cls=cls, cosmology_in_meta=False)
cosmo_cls = type(cosmology)
for name, col in table.columns.items():
param = getattr(cosmo_cls, name, None)
if not isinstance(param, Parameter) or param.unit in (None, u.one):
continue
# Replace column with unitless version
table.replace_column(name, (col << param.unit).value, copy=False)
if latex_names:
new_names = [_FORMAT_TABLE.get(k, k) for k in cosmology.__parameters__]
table.rename_columns(cosmology.__parameters__, new_names)
# Write HTML, using table I/O
table.write(file, overwrite=overwrite, format="ascii.html", **kwargs)
def html_identify(origin, filepath, fileobj, *args, **kwargs):
"""Identify if an object uses the HTML Table format.
Parameters
----------
origin : Any
Not used.
filepath : str or Any
From where to read the Cosmology.
fileobj : Any
Not used.
*args : Any
Not used.
**kwargs : Any
Not used.
Returns
-------
bool
If the filepath is a string ending with '.html'.
"""
return isinstance(filepath, str) and filepath.endswith(".html")
# ===================================================================
# Register
readwrite_registry.register_reader("ascii.html", Cosmology, read_html_table)
readwrite_registry.register_writer("ascii.html", Cosmology, write_html_table)
readwrite_registry.register_identifier("ascii.html", Cosmology, html_identify)
|
eba2d2de952b22a83f516afc9c14da509b501f3b8d74685a937ee309a3bd9af0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions, included here **FOR REFERENCE ONLY** since
the io registry cannot be displayed. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
""" # this is shown in the docs.
import copy
from collections.abc import Mapping
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
__all__ = [] # nothing is publicly scoped
def _rename_map(map, /, renames):
"""Apply rename to map."""
if common_names := set(renames.values()).intersection(map):
raise ValueError(
"'renames' values must be disjoint from 'map' keys, "
f"the common keys are: {common_names}"
)
return {renames.get(k, k): v for k, v in map.items()} # dict separate from input
def _get_cosmology_class(cosmology, params, /):
# get cosmology
# 1st from argument. Allows for override of the cosmology, if on file.
# 2nd from params. This MUST have the cosmology if 'kwargs' did not.
if cosmology is None:
cosmology = params.pop("cosmology")
else:
params.pop("cosmology", None) # pop, but don't use
# if string, parse to class
return _COSMOLOGY_CLASSES[cosmology] if isinstance(cosmology, str) else cosmology
def from_mapping(mapping, /, *, move_to_meta=False, cosmology=None, rename=None):
"""Load `~astropy.cosmology.Cosmology` from mapping object.
Parameters
----------
mapping : Mapping
Arguments into the class -- like "name" or "meta".
If 'cosmology' is None, must have field "cosmology" which can be either
the string name of the cosmology class (e.g. "FlatLambdaCDM") or the
class itself.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'map'.
rename : dict or None (optional, keyword-only)
A dictionary mapping keys in ``map`` to fields of the
`~astropy.cosmology.Cosmology`.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a dictionary with
``from_mapping``, we will first make a mapping using
:meth:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> cm = Planck18.to_format('mapping')
>>> cm
{'cosmology': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966,
'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046,
'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897,
'meta': ...
Now this dict can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(cm, format="mapping")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
Specific cosmology classes can be used to parse the data. The class'
default parameter values are used to fill in any information missing in the
data.
>>> from astropy.cosmology import FlatLambdaCDM
>>> del cm["Tcmb0"] # show FlatLambdaCDM provides default
>>> FlatLambdaCDM.from_format(cm)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897)
"""
# Rename keys, if given a ``renames`` dict.
# Also, make a copy of the mapping, so we can pop from it.
params = _rename_map(dict(mapping), renames=rename or {})
# Get cosmology class
cosmology = _get_cosmology_class(cosmology, params)
# select arguments from mapping that are in the cosmo's signature.
ba = cosmology._init_signature.bind_partial() # blank set of args
ba.apply_defaults() # fill in the defaults
for k in cosmology._init_signature.parameters.keys():
if k in params: # transfer argument, if in params
ba.arguments[k] = params.pop(k)
# deal with remaining params. If there is a **kwargs use that, else
# allow to transfer to metadata. Raise TypeError if can't.
lastp = tuple(cosmology._init_signature.parameters.values())[-1]
if lastp.kind == 4: # variable keyword-only
ba.arguments[lastp.name] = params
elif move_to_meta: # prefers current meta, which was explicitly set
meta = ba.arguments["meta"] or {} # (None -> dict)
ba.arguments["meta"] = {**params, **meta}
elif params:
raise TypeError(f"there are unused parameters {params}.")
# else: pass # no kwargs, no move-to-meta, and all the params are used
return cosmology(*ba.args, **ba.kwargs)
def to_mapping(
cosmology,
*args,
cls=dict,
cosmology_as_str=False,
move_from_meta=False,
rename=None,
):
"""Return the cosmology class, parameters, and metadata as a `dict`.
Parameters
----------
cosmology : :class:`~astropy.cosmology.Cosmology`
The cosmology instance to convert to a mapping.
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
cls : type (optional, keyword-only)
`dict` or `collections.Mapping` subclass.
The mapping type to return. Default is `dict`.
cosmology_as_str : bool (optional, keyword-only)
Whether the cosmology value is the class (if `False`, default) or
the semi-qualified name (if `True`).
move_from_meta : bool (optional, keyword-only)
Whether to add the Cosmology's metadata as an item to the mapping (if
`False`, default) or to merge with the rest of the mapping, preferring
the original values (if `True`)
rename : dict or None (optional, keyword-only)
A `dict` mapping fields of the :class:`~astropy.cosmology.Cosmology` to keys in
the map.
Returns
-------
Mapping
A mapping of type ``cls``, by default a `dict`.
Has key-values for the cosmology parameters and also:
- 'cosmology' : the class
- 'meta' : the contents of the cosmology's metadata attribute.
If ``move_from_meta`` is `True`, this key is missing and the
contained metadata are added to the main `dict`.
Examples
--------
A Cosmology as a mapping will have the cosmology's name and
parameters as items, and the metadata as a nested dictionary.
>>> from astropy.cosmology import Planck18
>>> Planck18.to_format('mapping')
{'cosmology': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966,
'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046,
'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897,
'meta': ...
The dictionary type may be changed with the ``cls`` keyword argument:
>>> from collections import OrderedDict
>>> Planck18.to_format('mapping', cls=OrderedDict)
OrderedDict([('cosmology', <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>),
('name', 'Planck18'), ('H0', <Quantity 67.66 km / (Mpc s)>),
('Om0', 0.30966), ('Tcmb0', <Quantity 2.7255 K>), ('Neff', 3.046),
('m_nu', <Quantity [0. , 0. , 0.06] eV>), ('Ob0', 0.04897),
('meta', ...
Sometimes it is more useful to have the name of the cosmology class, not
the object itself. The keyword argument ``cosmology_as_str`` may be used:
>>> Planck18.to_format('mapping', cosmology_as_str=True)
{'cosmology': 'FlatLambdaCDM', ...
The metadata is normally included as a nested mapping. To move the metadata
into the main mapping, use the keyword argument ``move_from_meta``. This
kwarg inverts ``move_to_meta`` in
``Cosmology.to_format("mapping", move_to_meta=...)`` where extra items
are moved to the metadata (if the cosmology constructor does not have a
variable keyword-only argument -- ``**kwargs``).
>>> from astropy.cosmology import Planck18
>>> Planck18.to_format('mapping', move_from_meta=True)
{'cosmology': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'name': 'Planck18', 'Oc0': 0.2607, 'n': 0.9665, 'sigma8': 0.8102, ...
Lastly, the keys in the mapping may be renamed with the ``rename`` keyword.
>>> rename = {'cosmology': 'cosmo_cls', 'name': 'cosmo_name'}
>>> Planck18.to_format('mapping', rename=rename)
{'cosmo_cls': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'cosmo_name': 'Planck18', ...
"""
if not issubclass(cls, (dict, Mapping)):
raise TypeError(f"'cls' must be a (sub)class of dict or Mapping, not {cls}")
m = cls()
# start with the cosmology class & name
m["cosmology"] = (
cosmology.__class__.__qualname__ if cosmology_as_str else cosmology.__class__
)
m["name"] = cosmology.name # here only for dict ordering
meta = copy.deepcopy(cosmology.meta) # metadata (mutable)
if move_from_meta:
# Merge the mutable metadata. Since params are added later they will
# be preferred in cases of overlapping keys. Likewise, need to pop
# cosmology and name from meta.
meta.pop("cosmology", None)
meta.pop("name", None)
m.update(meta)
# Add all the immutable inputs
m.update(
{
k: v
for k, v in cosmology._init_arguments.items()
if k not in ("meta", "name")
}
)
# Lastly, add the metadata, if haven't already (above)
if not move_from_meta:
m["meta"] = meta # TODO? should meta be type(cls)
# Rename keys
return m if rename is None else _rename_map(m, rename)
def mapping_identify(origin, format, *args, **kwargs):
"""Identify if object uses the mapping format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Mapping) and (format in (None, "mapping"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("mapping", Cosmology, from_mapping)
convert_registry.register_writer("mapping", Cosmology, to_mapping)
convert_registry.register_identifier("mapping", Cosmology, mapping_identify)
|
d0d8ce9e080fd72e84654ffaebb8c6901e3492d3ba82d9724368af19023d9c8a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from astropy.table import QTable
from .table import from_table, to_table
def read_ecsv(
filename, index=None, *, move_to_meta=False, cosmology=None, rename=None, **kwargs
):
"""Read a `~astropy.cosmology.Cosmology` from an ECSV file.
Parameters
----------
filename : path-like or file-like
From where to read the Cosmology.
index : int, str, or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index``
is a string, the "name" column is used as the indexing column.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
rename : dict or None (optional keyword-only)
A dictionary mapping column names to fields of the
`~astropy.cosmology.Cosmology`.
**kwargs
Passed to :attr:`astropy.table.QTable.read`
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
"""
kwargs["format"] = "ascii.ecsv"
with u.add_enabled_units(cu):
table = QTable.read(filename, **kwargs)
# build cosmology from table
return from_table(
table,
index=index,
move_to_meta=move_to_meta,
cosmology=cosmology,
rename=rename,
)
def write_ecsv(
cosmology,
file,
*,
overwrite=False,
cls=QTable,
cosmology_in_meta=True,
rename=None,
**kwargs
):
"""Serialize the cosmology into a ECSV.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`
The cosmology instance to convert to a mapping.
file : path-like or file-like
Location to save the serialized cosmology.
overwrite : bool
Whether to overwrite the file, if it exists.
cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` (sub)class to use when writing.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool (optional, keyword-only)
Whether to put the cosmology class in the Table metadata (if `True`,
default) or as the first column (if `False`).
rename : dict or None (optional keyword-only)
A dictionary mapping fields of the `~astropy.cosmology.Cosmology` to
columns of the table.
**kwargs
Passed to ``cls.write``
Raises
------
TypeError
If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table`
"""
table = to_table(
cosmology, cls=cls, cosmology_in_meta=cosmology_in_meta, rename=rename
)
kwargs["format"] = "ascii.ecsv"
table.write(file, overwrite=overwrite, **kwargs)
def ecsv_identify(origin, filepath, fileobj, *args, **kwargs):
"""Identify if object uses the Table format.
Returns
-------
bool
"""
return filepath is not None and filepath.endswith(".ecsv")
# ===================================================================
# Register
readwrite_registry.register_reader("ascii.ecsv", Cosmology, read_ecsv)
readwrite_registry.register_writer("ascii.ecsv", Cosmology, write_ecsv)
readwrite_registry.register_identifier("ascii.ecsv", Cosmology, ecsv_identify)
|
cd6dbdcaaf0c8cdc957222756ef632ffeb2c2f8462edf60f54dca33296fb70e1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for :mod:`astropy.cosmology.comparison`"""
import re
import numpy as np
import pytest
from astropy.cosmology import Cosmology, FlatCosmologyMixin, Planck18, cosmology_equal
from astropy.cosmology._io.tests.base import ToFromTestMixinBase
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.funcs.comparison import (
_cosmology_not_equal,
_CosmologyWrapper,
_parse_format,
_parse_formats,
)
class ComparisonFunctionTestBase(ToFromTestMixinBase):
"""Tests for cosmology comparison functions.
This class inherits from
`astropy.cosmology._io.tests.base.ToFromTestMixinBase` because the cosmology
comparison functions all have a kwarg ``format`` that allow the arguments to
be converted to a |Cosmology| using the ``to_format`` architecture.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must be
inherited in a subclass.
"""
@pytest.fixture(scope="class")
def cosmo(self):
return Planck18
@pytest.fixture(scope="class")
def cosmo_eqvxflat(self, cosmo):
if isinstance(cosmo, FlatCosmologyMixin):
return cosmo.nonflat
pytest.skip(
"cosmology is not flat, so does not have an equivalent non-flat cosmology."
)
@pytest.fixture(
scope="class",
params={k for k, _ in convert_registry._readers.keys()} - {"astropy.cosmology"},
)
def format(self, request):
return request.param
@pytest.fixture(scope="class")
def xfail_cant_autoidentify(self, format):
"""`pytest.fixture` form of method ``can_autoidentify`."""
if not self.can_autodentify(format):
pytest.xfail("cannot autoidentify")
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
return to_format(format)
@pytest.fixture(scope="class")
def pert_cosmo(self, cosmo):
# change one parameter
p = cosmo.__parameters__[0]
v = getattr(cosmo, p)
cosmo2 = cosmo.clone(
**{p: v * 1.0001 if v != 0 else 0.001 * getattr(v, "unit", 1)}
)
return cosmo2
@pytest.fixture(scope="class")
def pert_cosmo_eqvxflat(self, pert_cosmo):
if isinstance(pert_cosmo, FlatCosmologyMixin):
return pert_cosmo.nonflat
pytest.skip(
"cosmology is not flat, so does not have an equivalent non-flat cosmology."
)
@pytest.fixture(scope="class")
def pert_converted(self, pert_cosmo, format):
if format == "astropy.model": # special case Model
return pert_cosmo.to_format(format, method="comoving_distance")
return pert_cosmo.to_format(format)
class Test_parse_format(ComparisonFunctionTestBase):
"""Test functions ``_parse_format``."""
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
converted = to_format(format)
# Some raise a segfault! TODO: figure out why
if isinstance(converted, _CosmologyWrapper._cantbroadcast):
converted = _CosmologyWrapper(converted)
return converted
# ========================================================================
def test_shortcut(self, cosmo):
"""Test the already-a-cosmology shortcut."""
# A Cosmology
for fmt in {None, True, False, "astropy.cosmology"}:
assert _parse_format(cosmo, fmt) is cosmo, f"{fmt} failed"
# A Cosmology, but improperly formatted
# see ``test_parse_format_error_wrong_format``.
def test_convert(self, converted, format, cosmo):
"""Test converting a cosmology-like object"""
out = _parse_format(converted, format)
assert isinstance(out, Cosmology)
assert out == cosmo
def test_parse_format_error_wrong_format(self, cosmo):
"""
Test ``_parse_format`` errors when given a Cosmology object and format
is not compatible.
"""
with pytest.raises(
ValueError, match=re.escape("for parsing a Cosmology, 'format'")
):
_parse_format(cosmo, "mapping")
def test_parse_format_error_noncosmology_cant_convert(self):
"""
Test ``_parse_format`` errors when given a non-Cosmology object
and format is `False`.
"""
notacosmo = object()
with pytest.raises(TypeError, match=re.escape("if 'format' is False")):
_parse_format(notacosmo, False)
def test_parse_format_vectorized(self, cosmo, format, converted):
# vectorized on cosmos
out = _parse_format([cosmo, cosmo], None)
assert len(out) == 2
assert np.all(out == cosmo)
# vectorized on formats
out = _parse_format(cosmo, [None, None])
assert len(out) == 2
assert np.all(out == cosmo)
# more complex broadcast
out = _parse_format(
[[cosmo, converted], [converted, cosmo]], [[None, format], [format, None]]
)
assert out.shape == (2, 2)
assert np.all(out == cosmo)
def test_parse_formats_vectorized(self, cosmo):
# vectorized on cosmos
out = _parse_formats(cosmo, cosmo, format=None)
assert len(out) == 2
assert np.all(out == cosmo)
# does NOT vectorize on formats
with pytest.raises(ValueError, match="operands could not be broadcast"):
_parse_formats(cosmo, format=[None, None])
class Test_cosmology_equal(ComparisonFunctionTestBase):
"""Test :func:`astropy.cosmology.comparison.cosmology_equal`"""
def test_cosmology_equal_simple(self, cosmo, pert_cosmo):
# equality
assert cosmology_equal(cosmo, cosmo) is True
# not equal to perturbed cosmology
assert cosmology_equal(cosmo, pert_cosmo) is False
def test_cosmology_equal_equivalent(
self, cosmo, cosmo_eqvxflat, pert_cosmo, pert_cosmo_eqvxflat
):
# now need to check equivalent, but not equal, cosmologies.
assert cosmology_equal(cosmo, cosmo_eqvxflat, allow_equivalent=True) is True
assert cosmology_equal(cosmo, cosmo_eqvxflat, allow_equivalent=False) is False
assert (
cosmology_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=True)
is True
)
assert (
cosmology_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=False)
is False
)
def test_cosmology_equal_too_many_cosmo(self, cosmo):
with pytest.raises(
TypeError, match="cosmology_equal takes 2 positional arguments"
):
cosmology_equal(cosmo, cosmo, cosmo)
def test_cosmology_equal_format_error(self, cosmo, converted):
# Not converting `converted`
with pytest.raises(TypeError):
cosmology_equal(cosmo, converted)
with pytest.raises(TypeError):
cosmology_equal(cosmo, converted, format=False)
def test_cosmology_equal_format_auto(
self, cosmo, converted, xfail_cant_autoidentify
):
# These tests only run if the format can autoidentify.
assert cosmology_equal(cosmo, converted, format=None) is True
assert cosmology_equal(cosmo, converted, format=True) is True
def test_cosmology_equal_format_specify(
self, cosmo, format, converted, pert_converted
):
# equality
assert cosmology_equal(cosmo, converted, format=[None, format]) is True
assert cosmology_equal(converted, cosmo, format=[format, None]) is True
# non-equality
assert cosmology_equal(cosmo, pert_converted, format=[None, format]) is False
def test_cosmology_equal_equivalent_format_specify(
self, cosmo, format, converted, cosmo_eqvxflat
):
# specifying the format
assert (
cosmology_equal(
cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=True
)
is True
)
assert (
cosmology_equal(
converted, cosmo_eqvxflat, format=[format, None], allow_equivalent=True
)
is True
)
class Test_cosmology_not_equal(ComparisonFunctionTestBase):
"""Test :func:`astropy.cosmology.comparison._cosmology_not_equal`"""
def test_cosmology_not_equal_simple(self, cosmo, pert_cosmo):
# equality
assert _cosmology_not_equal(cosmo, cosmo) is False
# not equal to perturbed cosmology
assert _cosmology_not_equal(cosmo, pert_cosmo) is True
def test_cosmology_not_equal_too_many_cosmo(self, cosmo):
with pytest.raises(TypeError, match="_cosmology_not_equal takes 2 positional"):
_cosmology_not_equal(cosmo, cosmo, cosmo)
def test_cosmology_not_equal_equivalent(
self, cosmo, cosmo_eqvxflat, pert_cosmo, pert_cosmo_eqvxflat
):
# now need to check equivalent, but not equal, cosmologies.
assert (
_cosmology_not_equal(cosmo, cosmo_eqvxflat, allow_equivalent=False) is True
)
assert (
_cosmology_not_equal(cosmo, cosmo_eqvxflat, allow_equivalent=True) is False
)
assert (
_cosmology_not_equal(
pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=False
)
is True
)
assert (
_cosmology_not_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=True)
is False
)
def test_cosmology_not_equal_format_error(self, cosmo, converted):
# Not converting `converted`
with pytest.raises(TypeError):
_cosmology_not_equal(cosmo, converted)
with pytest.raises(TypeError):
_cosmology_not_equal(cosmo, converted, format=False)
def test_cosmology_not_equal_format_auto(
self, cosmo, pert_converted, xfail_cant_autoidentify
):
assert _cosmology_not_equal(cosmo, pert_converted, format=None) is True
assert _cosmology_not_equal(cosmo, pert_converted, format=True) is True
def test_cosmology_not_equal_format_specify(
self, cosmo, format, converted, pert_converted
):
# specifying the format
assert (
_cosmology_not_equal(cosmo, pert_converted, format=[None, format]) is True
)
assert (
_cosmology_not_equal(pert_converted, cosmo, format=[format, None]) is True
)
# equality
assert _cosmology_not_equal(cosmo, converted, format=[None, format]) is False
def test_cosmology_not_equal_equivalent_format_specify(
self, cosmo, format, converted, cosmo_eqvxflat
):
# specifying the format
assert (
_cosmology_not_equal(
cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=False
)
is True
)
assert (
_cosmology_not_equal(
cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=True
)
is False
)
assert (
_cosmology_not_equal(
converted, cosmo_eqvxflat, format=[format, None], allow_equivalent=True
)
is False
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.